rpact/0000755000176200001440000000000014450555553011376 5ustar liggesusersrpact/NAMESPACE0000644000176200001440000000735714447544614012633 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(as.data.frame,AnalysisResults) S3method(as.data.frame,ParameterSet) S3method(as.data.frame,PowerAndAverageSampleNumberResult) S3method(as.data.frame,StageResults) S3method(as.data.frame,TrialDesign) S3method(as.data.frame,TrialDesignCharacteristics) S3method(as.data.frame,TrialDesignPlan) S3method(as.data.frame,TrialDesignSet) S3method(as.matrix,FieldSet) S3method(knit_print,ParameterSet) S3method(length,TrialDesignSet) S3method(names,AnalysisResults) S3method(names,FieldSet) S3method(names,SimulationResults) S3method(names,StageResults) S3method(names,TrialDesignSet) S3method(plot,AnalysisResults) S3method(plot,Dataset) S3method(plot,EventProbabilities) S3method(plot,NumberOfSubjects) S3method(plot,ParameterSet) S3method(plot,SimulationResults) S3method(plot,StageResults) S3method(plot,SummaryFactory) S3method(plot,TrialDesign) S3method(plot,TrialDesignCharacteristics) S3method(plot,TrialDesignPlan) S3method(plot,TrialDesignSet) S3method(print,Dataset) S3method(print,FieldSet) S3method(print,ParameterSet) S3method(print,SimulationResults) S3method(print,SummaryFactory) S3method(print,TrialDesignCharacteristics) S3method(summary,AnalysisResults) S3method(summary,Dataset) S3method(summary,ParameterSet) S3method(summary,TrialDesignSet) export(getAccrualTime) export(getAnalysisResults) export(getAvailablePlotTypes) export(getClosedCombinationTestResults) export(getClosedConditionalDunnettTestResults) export(getConditionalPower) export(getConditionalRejectionProbabilities) export(getData) export(getData.SimulationResults) export(getDataSet) export(getDataset) export(getDesignCharacteristics) export(getDesignConditionalDunnett) export(getDesignFisher) export(getDesignGroupSequential) export(getDesignInverseNormal) export(getDesignSet) export(getEventProbabilities) export(getFinalConfidenceInterval) export(getFinalPValue) export(getGroupSequentialProbabilities) export(getHazardRatioByPi) export(getLambdaByMedian) export(getLambdaByPi) export(getLambdaStepFunction) export(getLogLevel) export(getLongFormat) export(getMedianByLambda) export(getMedianByPi) export(getNumberOfSubjects) export(getObjectRCode) export(getObservedInformationRates) export(getOutputFormat) export(getParameterCaption) export(getParameterName) export(getPerformanceScore) export(getPiByLambda) export(getPiByMedian) export(getPiecewiseExponentialDistribution) export(getPiecewiseExponentialQuantile) export(getPiecewiseExponentialRandomNumbers) export(getPiecewiseSurvivalTime) export(getPlotSettings) export(getPowerAndAverageSampleNumber) export(getPowerMeans) export(getPowerRates) export(getPowerSurvival) export(getRawData) export(getRepeatedConfidenceIntervals) export(getRepeatedPValues) export(getSampleSizeMeans) export(getSampleSizeRates) export(getSampleSizeSurvival) export(getSimulationEnrichmentMeans) export(getSimulationEnrichmentRates) export(getSimulationEnrichmentSurvival) export(getSimulationMeans) export(getSimulationMultiArmMeans) export(getSimulationMultiArmRates) export(getSimulationMultiArmSurvival) export(getSimulationRates) export(getSimulationSurvival) export(getStageResults) export(getTestActions) export(getWideFormat) export(kable) export(kable.ParameterSet) export(plotTypes) export(ppwexp) export(printCitation) export(qpwexp) export(rcmd) export(readDataset) export(readDatasets) export(resetLogLevel) export(rpwexp) export(setLogLevel) export(setOutputFormat) export(testPackage) export(test_plan_section) export(writeDataset) export(writeDatasets) exportMethods("[") exportMethods(t) import(graphics) import(methods) import(stats) import(tools) import(utils) importFrom(Rcpp,evalCpp) importFrom(knitr,kable) importFrom(knitr,knit_print) importFrom(methods,new) importFrom(rlang,.data) useDynLib(rpact, .registration = TRUE) rpact/README.md0000644000176200001440000001067114450544102012646 0ustar liggesusers [![CRAN Status](https://www.r-pkg.org/badges/version/rpact)](https://cran.r-project.org/package=rpact) [![R-CMD-check](https://github.com/rpact-com/rpact/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/rpact-com/rpact/actions/workflows/R-CMD-check.yaml) [![Total downloads](https://cranlogs.r-pkg.org/badges/grand-total/rpact?color=blue)](https://CRAN.R-project.org/package=rpact) [![Monthly downloads](https://cranlogs.r-pkg.org/badges/rpact?color=blue)](https://CRAN.R-project.org/package=rpact) [![Lifecycle: stable](https://img.shields.io/badge/lifecycle-stable-brightgreen.svg)](https://lifecycle.r-lib.org/articles/stages.html#stable) [![License: LGPL v3](https://img.shields.io/badge/License-LGPL_v3-blue.svg)](https://www.gnu.org/licenses/lgpl-3.0) [![shinyapps.io](https://img.shields.io/badge/Shiny-shinyapps.io-blue?style=flat&labelColor=white&logo=RStudio&logoColor=blue)](https://rpact.shinyapps.io/public/) # rpact Confirmatory Adaptive Clinical Trial Design, Simulation, and Analysis. ## Functional Range - Sample size and power calculation for - means (continuous endpoint) - rates (binary endpoint) - survival trials with - piecewise accrual time and intensity - piecewise exponential survival time - survival times that follow a Weibull distribution - Fixed sample design and designs with interim analysis stages - Simulation tool for means, rates, and survival data - Assessment of adaptive sample size/event number recalculations based on conditional power - Assessment of treatment selection strategies in multi-arm trials - Adaptive analysis of means, rates, and survival data - Adaptive designs and analysis for multi-arm trials - Adaptive analysis and simulation tools for enrichment design testing means, rates, and hazard ratios - Automatic boundary recalculations during the trial for analysis with alpha spending approach, including under- and over-running ## Installation Install the latest CRAN release via ``` r install.packages("rpact") ``` ## Documentation The documentation is hosted at ## Vignettes The vignettes are hosted at [www.rpact.org/vignettes](https://www.rpact.org/vignettes/) ## The rpact user group The *rpact project* has an active user group consisting of decision-makers and users from the pharmaceutical industry and CROs, who meet regularly and, e.g., discuss best practices. We invite you to be part of the *rpact user group*: benefit from know-how, shape open source development in Pharma! ## Use on corporate computer systems Please [contact](https://www.rpact.com/contact) us to learn how to use `rpact` on FDA/GxP-compliant validated corporate computer systems and how to get a copy of the formal validation documentation that is customized and licensed for exclusive use by your company, e.g., to fulfill regulatory requirements. The validation documentation contains the personal access data for performing the installation qualification with `testPackage()`. > [www.rpact.com/contact](https://www.rpact.com/contact) # About - **rpact** is a comprehensive validated[^1] R package for clinical research which - enables the design and analysis of confirmatory adaptive group sequential designs - is a powerful sample size calculator - is a free of charge open-source software licensed under [LGPL-3](https://cran.r-project.org/web/licenses/LGPL-3) - particularly, implements the methods described in the recent monograph by [Wassmer and Brannath (2016)](https://doi.org/10.1007%2F978-3-319-32562-0) > For more information please visit > [www.rpact.org](https://www.rpact.org) - **RPACT** is a company which offers - enterprise R/Shiny software development services - technical support for the [rpact](https://cran.r-project.org/package=rpact) package - consultancy and user training for scientists using R - validated software solutions and R package development for clinical research > For more information please visit > [www.rpact.com](https://www.rpact.com) [^1]: The rpact validation documentation is available exclusively for our customers and supporting members. For more information visit [www.rpact.com/services/sla](https://www.rpact.com/services/sla) rpact/data/0000755000176200001440000000000014155607334012304 5ustar liggesusersrpact/data/dataRates.RData0000644000176200001440000000035014263462164015127 0ustar liggesusers r0b```f`adf`f2؜ LXvPa8m`N*.FKC7J{6`7O JCk^b.S `OAيKӁwqbnANjpf\-,5/K-J P q(G Ӣr=dݘX s#L #ziE@@? rpact/data/dataSurvival.RData0000644000176200001440000000043614263462164015671 0ustar liggesusers r0b```f`adf`f2؜ LX HIm( ͰP(6u#;p0u?m- x ճJ}b9qN9675}<U;&_w֥k:.|n@M~erd|3u_|Ϫ9AsxK}V!=F1EnF\R;̋l FDӒ`/9}U |*b P=[Ly̢()˔*ÔoHY䱦RPD0HI,y&N"~ sߠrpact/data/dataEnrichmentMeans.RData0000644000176200001440000000104514263462164017133 0ustar liggesusers r0b```f`adf`f2؜ LXO#ydm&| c-0 0j| q.sO;AO'!I(:S"fs S龃+!HmCyLjgo~}E$= &[ s(׹!vNN_ ě~Eַ#G}Bև蟫,"ɦ`%Gs淌&]c$| ?})RF|8[Е=[ИRBbT}-xܖUib&[!xk#nrpact/data/rawDataTwoArmNormal.RData0000644000176200001440000002323714155607324017115 0ustar liggesusersTTٺ (̴T$TE&I2#@Ha@1!"ڨAZ1P>޽qN˽g3XcmjWX{9Keq5rRQQiFU:sk%{J$^o:b='@$>ɏRBT%?D\H~w&jɿDc"%"'$")4","-9HK."RO'TDorw"97QL%Rݓ)rʥr) R.K9RNS.K9RO\)rʥr)\)rʥh/TPcd$:4 ,Z?(2<& ڶ_F/TZGhu:ZGhu:3|kՄX݈ԫ+6&< ^)=7@%ef|Yw;G΃9n\-+ ʲm܇29[dQY3 ʳlմĮzˡ4LMpv?[xgVGT@>C}`pXhb$\g, kأ]ǎ͒w<jcq2|^5VN:ә*p߳'Vs7N=l{N?,'j0ӿzyV6'j/rCrhmxLY[Sz ԙuT+rJR): Ss2ؑI ڷWj\=klwXM2'qa&I2׀7ӹ%'gþ,|>WMY3\ʴ>̇8{sŸA5p[Ӵ7<ND_cܾ2th;~c ܥiCThD ڄ pOiOL\u U>#t:6$fD8}j &.}YJOf7`+88 e :']fhm˙\ _I@V`>sn whǔW ÿU<\ 2o?(cqpކ֤.J73]$'<(nlp:)̴dt>w >tz:$'6vso*^U'!BbhG OsfvpOx 'oo? {5K֔Ԃ{cn L^6"N) #KO 2{-θY}6qSB5rbXk{{kVMm)ٌ2YM ONe5Ċs_ᡢaVܛ_}eL.;~^I_,SovoHk\T8R.M}[(nᡙsP~,4arflDz4ݲ]k|x[¦n!L.,ԫ)"чvG9?3fӂŇfcI*__5vE.3M~Jk׹f|?ꁏ'${W3 ~\w؉$VfԶB&-fg㰳*vrGXy xހHeZo;g}x^vcJ3%DlֺnO߉rJ")20ܰ"*jpnM1/~ʌ܉jV{6vzQ1ZmRd'$EikL.h2XU^8,3pM !zwG`WYgH(ʮW.MOkpaNKl S9M4.b^o':ëc@2u߶7D9~->=Akc]F'A{H><L)Z%@vgUt;pM<\fF.8b!\$zAp5E *(w%^ vz*:ܯ7ޅ〹xslFLwqwl;Ayںk4!D3.2rvh8bSG\z?}g /zTo9"8m:.lŅL42=-j!&ߥ}7p;qbMpypM_qnyO,]wN3жd6w{ i<;;k\-\:L̓۔ۆAX̋}p-*}˩W-|n?~Fb7<_̘bl<Tl 򘍶f7x-d7ǚD>1݁=#V@t7^L?𮣐 {ZN`J5$Ӧ7~Ӣ x.Q'U!HnܑoѲij?K ;\ȂϹuJL_jtr7|La~<c׾*y#_L]q  o+]NfaߡL?rkEkICNK|Ke>Z&8yyf3Fv鹏J;S |MݾeHk{y^z̵m~U`NYz̊xp G m1k f5uy3wѯMT%o?{>c1pm~~dwR{hrx\ۖW_'?>dQp>)nǟޚwI1j< ic,`8?y_)0Tr#zCgå`zsZ x]rd6 tjeDMdKr}FS`*>|#c;S Wt0'wyp:9?@{.E,x! ;b_۳}V~_Vtݫiґ>xwbt|U=b!͐l\U {pQ< f'+vf]{n'  MS75Y˥|l&i`xܰXb> 9iv^48@s$! VS} RӮ&qq}f=ʂxh)L_c>34'/D_s%ӑi9R9^;~e-\2aqid86>56DwB(7p6?*ìPL͗/DmՋ_MtNԓ{zv)yϑNq?${} Qt;.4G@hlA7Y.Gz_-%2udPחO?t)>o\oN=O޹f#N?'OϭOWk &w*g?,M?YOПܒZ| hX6wnso?WZ|Ixɀx4r5\~Z<-xAk'~l`Zc-DK'Kf[jmq[ʴ-ܖ4U/W8Uf3x0gD*.6ܘ)L΀e<`9/3=-|f{pv rt|y *M+gdpxtxYն߄ŃsׯU;t9GxcVvZUc9g&/הGgug=sӺ_27t2pGl7p`>;ڤ qYм =#DNٽ6'߸ Xki5zo ud.Ϫ+|יL1Gg˯Oͨ"C9aۦ>.`uR:܁uC>z_b/^CuSǃ? b?~{,o Ϊ㛺qTWM9y:UN#!X4/퉀Y_pmG9=r4gKeFncŹװZ;r{b?.:fE] 4L[1U+>՝i! }@&g(q%#a9`{P]a63 3OA{=\`eUhzMT:t\k=' `)Ribhf{ 3A1/AV;mѣ' h۩hH&r@ ƥ\3YX^o-u!7OHgZәB+YeGOW0뿭gb9DLB4AlM|~KNDɼvK`eaն))`ձ>4:y4_6 g^wHK]ֻL.]ę'I.~3A޺fݒj?B;}©Y=ـ$ѣQ KwSGEƅ>!tސ0o>9R^c*fJ 3)oǬmk!twc5l }` ?rm38qx[[Ҋ6{O6o*p jxjȴyxj/y2&Ko?"j{׿^l_ ¤:,d 8MslSWߐN;OxO!x5c4gD6];QI<秶G]6xgr_h|S)#ꙺyx5X{wj N%,nF_;uߗLLWz'T3fӹr>&ztU|M(Qamv:[h*EG!:es 8 iAC}. 2V c9-~[,ۖYgqU+A;8&rZgݪ®@Z~SH7OqЯ !zxwa,XIqFENW8 wAuvJS fny2}چ['tX`znz]lL3ط:7M\t~̙:9o Sv{nԓMQ2DT!o8n>n^=D6`{sLpaLF@Vp?&l(.77urI.SvvV@jAhkַW^:puJ9΍;pv Si'uditu-ڥk vF:3?\bfwn Z;8v&MKk0XŗLhVL}"'bVEYr\>oX]Й3"- 0x,Mr'꫼Ta݃r ֪+lDe1 cxgejQF2ܢcc~!]*a: d4 PtcpJuܻV` n-G5gWpSgR[M5ѾH9X2}ڶlDy.I&rcnX&61zudX$(*rX>_oKL;g %9!QXϝ!L߬su]rgAj9FL/]fk"6N'@DY^}+V~jڄ=mi:81˥ = C5D ^]0kpDWOctS-:.(,xO2| ub?&[ӇI%H%*tF3hvrpact/data/dataMultiArmSurvival.RData0000644000176200001440000000050614263462164017342 0ustar liggesusers r0b```f`adf`f2؜ LX\t(=\Txl^?"qW )P:pQ^k 7/џ!t;qqjxۆDpa4A^o4?(- P]Pq7Phz4L+[4̀&@z4?(-y (KzCõkq|B**>ZU8{Sj'7vVM1c-EcP[j*<|{,1\ )hr܈U f.OX_{W4Cr {g #>d^4\4( `ּbHÓ*V\؋KSK`\܂̪bC,b[.1m3YZraeE99aJ"["S3΢r=XX:L @/49'0A.P7C/[>j rpact/data/dataMultiArmMeans.RData0000644000176200001440000000070714263462164016575 0ustar liggesusers r0b```f`adf`f2؜ LX 1 was specified, a grid plot will be returned if the number of plots is <= specified \code{grid} value; a list of \code{ggplot} objects will be returned otherwise. If \code{grid = 0} is specified, all plots will be created using \code{\link[base]{print}} command and a list of \code{ggplot} objects will be returned invisible. Note that one of the following packages must be installed to create a grid plot: 'ggpubr', 'gridExtra', or 'cowplot'.} } \description{ Parameter Description: Grid (Output Specification Of Multiple Plots) } \keyword{internal} rpact/man/param_pi2_rates.Rd0000644000176200001440000000067114335631010015475 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_pi2_rates} \alias{param_pi2_rates} \title{Parameter Description: Pi (2) for Rates} \arguments{ \item{pi2}{A numeric value that represents the assumed probability in the reference group if two treatment groups are considered, default is \code{0.2}.} } \description{ Parameter Description: Pi (2) for Rates } \keyword{internal} rpact/man/as.data.frame.StageResults.Rd0000644000176200001440000000221714335631006017460 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \name{as.data.frame.StageResults} \alias{as.data.frame.StageResults} \title{Coerce Stage Results to a Data Frame} \usage{ \method{as.data.frame}{StageResults}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, type = 1, ... ) } \arguments{ \item{x}{A \code{\link{StageResults}} object.} \item{niceColumnNamesEnabled}{Logical. If \code{TRUE}, nice looking column names will be used; syntactic names (variable names) otherwise (see \code{\link[base]{make.names}}).} \item{includeAllParameters}{Logical. If \code{TRUE}, all available parameters will be included in the data frame; a meaningful parameter selection otherwise, default is \code{FALSE}.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} } \value{ Returns a \code{\link[base]{data.frame}}. } \description{ Returns the \code{StageResults} as data frame. } \details{ Coerces the stage results to a data frame. } \keyword{internal} rpact/man/param_design.Rd0000644000176200001440000000044614232463333015065 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_design} \alias{param_design} \title{Parameter Description: Design} \arguments{ \item{design}{The trial design.} } \description{ Parameter Description: Design } \keyword{internal} rpact/man/param_three_dots_plot.Rd0000644000176200001440000000075414335631010017005 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_three_dots_plot} \alias{param_three_dots_plot} \title{Parameter Description: "..." (optional plot arguments)} \arguments{ \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} } \description{ Parameter Description: "..." (optional plot arguments) } \keyword{internal} rpact/man/ConditionalPowerResultsEnrichmentMeans.Rd0000644000176200001440000000345014450467342022303 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{ConditionalPowerResultsEnrichmentMeans} \alias{ConditionalPowerResultsEnrichmentMeans} \title{Conditional Power Results Enrichment Means} \description{ Class for conditional power calculations of enrichment means data } \details{ This object cannot be created directly; use \code{\link{getConditionalPower}} with suitable arguments to create the results of a group sequential or a combination test design. } \section{Fields}{ \describe{ \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{simulated}}{Describes if the power for Fisher's combination test has been simulated. Only applicable when using Fisher designs. Is a logical vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{assumedStDevs}}{Assumed standard deviations to calculate conditional power in multi-arm trials or enrichment designs. Is a numeric vector.} }} \keyword{internal} rpact/man/SimulationResultsMultiArmMeans.Rd0000644000176200001440000001536614450467343020617 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsMultiArmMeans} \alias{SimulationResultsMultiArmMeans} \title{Class for Simulation Results Multi-Arm Means} \description{ A class for simulation results means in multi-arm designs. } \details{ Use \code{\link[=getSimulationMultiArmMeans]{getSimulationMultiArmMeans()}} to create an object of this type. } \section{Fields}{ \describe{ \item{\code{maxNumberOfIterations}}{The number of simulation iterations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{stDev}}{The standard deviation used for sample size and power calculation. Is a numeric vector of length 1.} \item{\code{plannedSubjects}}{Determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, refers to the number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{minNumberOfSubjectsPerStage}}{Determines the minimum number of subjects per stage for data-driven sample size recalculation. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, is the minimum number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{maxNumberOfSubjectsPerStage}}{Determines the maximum number of subjects per stage for data-driven sample size recalculation. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, is the minimum number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{stDevH1}}{The standard deviation under which the conditional power or sample size recalculation is performed. Is a numeric vector of length 1.} \item{\code{calcSubjectsFunction}}{An optional function that can be entered to define how sample size is recalculated. By default, recalculation is performed with conditional power with specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage}.} \item{\code{expectedNumberOfSubjects}}{The expected number of subjects under specified alternative.} \item{\code{activeArms}}{The number of active treatment arms to be compared with control. Is a numeric vector of length 1 containing a whole number.} \item{\code{effectMatrix}}{The matrix of effect sizes with \code{activeArms} columns and number of rows reflecting the different situations to consider.} \item{\code{typeOfShape}}{The shape of the dose-response relationship over the treatment groups. Is a character vector of length 1.} \item{\code{muMaxVector}}{The range of effect sizes for the treatment group with highest response for \code{"linear"} and \code{"sigmoidEmax"} model. Is a numeric vector.} \item{\code{gED50}}{The ED50 of the sigmoid Emax model. Only necessary if \code{typeOfShape = "sigmoidEmax"} has been specified. Is a numeric vector of length 1.} \item{\code{slope}}{The slope of the sigmoid Emax model, if \code{typeOfShape = "sigmoidEmax"} Is a numeric vector of length 1.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{adaptations}}{Indicates whether or not an adaptation takes place at interim k. Is a logical vector of length \code{kMax} minus 1.} \item{\code{typeOfSelection}}{The way the treatment arms or populations are selected at interim. Is a character vector of length 1.} \item{\code{effectMeasure}}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (\code{"effectEstimate"}). Is a character vector of length 1.} \item{\code{successCriterion}}{Defines when the study is stopped for efficacy at interim. \code{"all"} stops the trial if the efficacy criterion has been fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim. Is a character vector of length 1.} \item{\code{epsilonValue}}{Needs to be specified if \code{typeOfSelection = "epsilon"}. Is a numeric vector of length 1.} \item{\code{rValue}}{Needs to be specified if \code{typeOfSelection = "rBest"}. Is a numeric vector of length 1.} \item{\code{threshold}}{The selection criterion: treatment arm/population is only selected if \code{effectMeasure} exceeds \code{threshold}. Either a single numeric value or a numeric vector of length \code{activeArms} referring to a separate threshold condition for each treatment arm.} \item{\code{selectArmsFunction}}{An optional function that can be entered to define how treatment arms are selected.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{selectedArms}}{The selected arms in multi-armed designs.} \item{\code{numberOfActiveArms}}{The number of active arms in a multi-armed design. Is a numeric matrix.} \item{\code{rejectAtLeastOne}}{The probability to reject at least one of the (multiple) hypotheses. Is a numeric vector.} \item{\code{rejectedArmsPerStage}}{The simulated number of rejected arms per stage.} \item{\code{successPerStage}}{The simulated success probabilities per stage where success is defined by user. Is a numeric matrix.} \item{\code{sampleSizes}}{The sample sizes for each group and stage. Is a numeric vector of length number of stages times number of groups containing whole numbers.} \item{\code{conditionalPowerAchieved}}{The calculated conditional power, under the assumption of observed or assumed effect sizes. Is a numeric matrix.} }} \keyword{internal} rpact/man/getDesignCharacteristics.Rd0000644000176200001440000000522414445307236017405 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_group_sequential.R \name{getDesignCharacteristics} \alias{getDesignCharacteristics} \title{Get Design Characteristics} \usage{ getDesignCharacteristics(design = NULL, ...) } \arguments{ \item{design}{The trial design.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} } \value{ Returns a \code{\link{TrialDesignCharacteristics}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.ParameterSet]{plot()}} to plot the object, \item \code{\link[=as.data.frame.TrialDesignCharacteristics]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Calculates the characteristics of a design and returns it. } \details{ Calculates the inflation factor (IF), the expected reduction in sample size under H1, under H0, and under a value in between H0 and H1. Furthermore, absolute information values are calculated under the prototype case testing H0: mu = 0 against H1: mu = 1. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Calculate design characteristics for a three-stage O'Brien & Fleming # design at power 90\% and compare it with Pocock's design. getDesignCharacteristics(getDesignGroupSequential(beta = 0.1)) getDesignCharacteristics(getDesignGroupSequential(beta = 0.1, typeOfDesign = "P")) } \seealso{ Other design functions: \code{\link{getDesignConditionalDunnett}()}, \code{\link{getDesignFisher}()}, \code{\link{getDesignGroupSequential}()}, \code{\link{getDesignInverseNormal}()}, \code{\link{getGroupSequentialProbabilities}()}, \code{\link{getPowerAndAverageSampleNumber}()} } \concept{design functions} rpact/man/getEventProbabilities.Rd0000644000176200001440000001367414372411347016740 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getEventProbabilities} \alias{getEventProbabilities} \title{Get Event Probabilities} \usage{ getEventProbabilities( time, ..., accrualTime = c(0, 12), accrualIntensity = 0.1, accrualIntensityType = c("auto", "absolute", "relative"), kappa = 1, piecewiseSurvivalTime = NA_real_, lambda2 = NA_real_, lambda1 = NA_real_, allocationRatioPlanned = 1, hazardRatio = NA_real_, dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, maxNumberOfSubjects = NA_real_ ) } \arguments{ \item{time}{A numeric vector with time values.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{accrualTime}{The assumed accrual time intervals for the study, default is \code{c(0, 12)} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{accrualIntensity}{A numeric vector of accrual intensities, default is the relative intensity \code{0.1} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{accrualIntensityType}{A character value specifying the accrual intensity input type. Must be one of \code{"auto"}, \code{"absolute"}, or \code{"relative"}; default is \code{"auto"}, i.e., if all values are < 1 the type is \code{"relative"}, otherwise it is \code{"absolute"}.} \item{kappa}{A numeric value > 0. A \code{kappa != 1} will be used for the specification of the shape of the Weibull distribution. Default is \code{1}, i.e., the exponential survival distribution is used instead of the Weibull distribution. Note that the Weibull distribution cannot be used for the piecewise definition of the survival time distribution, i.e., only \code{piecewiselambda} (as a single value) and \code{kappa} can be specified. This function is equivalent to \code{pweibull(t, shape = kappa, scale = 1 / lambda)} of the \code{stats} package, i.e., the scale parameter is \code{1 / 'hazard rate'}.\cr For example, \code{getPiecewiseExponentialDistribution(time = 130, piecewiseLambda = 0.01, kappa = 4.2)} and \code{pweibull(q = 130, shape = 4.2, scale = 1 / 0.01)} provide the sample result.} \item{piecewiseSurvivalTime}{A vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function \cr (for details see \code{\link[=getPiecewiseSurvivalTime]{getPiecewiseSurvivalTime()}}).} \item{lambda2}{The assumed hazard rate in the reference group, there is no default. \code{lambda2} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} \item{lambda1}{The assumed hazard rate in the treatment group, there is no default. \code{lambda1} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the smallest overall sample size is determined.} \item{hazardRatio}{The vector of hazard ratios under consideration. If the event or hazard rates in both treatment groups are defined, the hazard ratio needs not to be specified as it is calculated, there is no default. Must be a positive numeric of length 1.} \item{dropoutRate1}{The assumed drop-out rate in the treatment group, default is \code{0}.} \item{dropoutRate2}{The assumed drop-out rate in the control group, default is \code{0}.} \item{dropoutTime}{The assumed time for drop-out rates in the control and the treatment group, default is \code{12}.} \item{maxNumberOfSubjects}{If \code{maxNumberOfSubjects > 0} is specified, the end of accrual at specified \code{accrualIntensity} for the specified number of subjects is determined or \code{accrualIntensity} is calculated at fixed end of accrual.} } \value{ Returns a \code{\link{EventProbabilities}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.EventProbabilities]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the event probabilities for specified parameters at given time vector. } \details{ The function computes the overall event probabilities in a two treatment groups design. For details of the parameters see \code{\link[=getSampleSizeSurvival]{getSampleSizeSurvival()}}. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Calculate event probabilities for staggered subjects' entry, piecewisely defined # survival time and hazards, and plot it. timeVector <- seq(0, 100, 1) y <- getEventProbabilities(timeVector, accrualTime = c(0, 20, 60), accrualIntensity = c(5, 20), piecewiseSurvivalTime = c(0, 20, 80), lambda2 = c(0.02, 0.06, 0.1), hazardRatio = 2 ) \dontrun{ plot(timeVector, y$cumulativeEventProbabilities, type = 'l') } } rpact/man/param_stDev.Rd0000644000176200001440000000105314335631010014665 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_stDev} \alias{param_stDev} \title{Parameter Description: Standard Deviation} \arguments{ \item{stDev}{The standard deviation under which the sample size or power calculation is performed, default is \code{1}. If \code{meanRatio = TRUE} is specified, \code{stDev} defines the coefficient of variation \code{sigma / mu2}. Must be a positive numeric of length 1.} } \description{ Parameter Description: Standard Deviation } \keyword{internal} rpact/man/getGroupSequentialProbabilities.Rd0000644000176200001440000000726414402556624021006 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_group_sequential.R \name{getGroupSequentialProbabilities} \alias{getGroupSequentialProbabilities} \title{Get Group Sequential Probabilities} \usage{ getGroupSequentialProbabilities(decisionMatrix, informationRates) } \arguments{ \item{decisionMatrix}{A matrix with either 2 or 4 rows and kMax = length(informationRates) columns, see details.} \item{informationRates}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}.} } \value{ Returns a numeric matrix containing the probabilities described in the details section. } \description{ Calculates probabilities in the group sequential setting. } \details{ Given a sequence of information rates (fixing the correlation structure), and decisionMatrix with either 2 or 4 rows and kMax = length(informationRates) columns, this function calculates a probability matrix containing, for two rows, the probabilities:\cr P(Z_1 <- l_1), P(l_1 <- Z_1 < u_1, Z_2 < l_1),..., P(l_kMax-1 <- Z_kMax-1 < u_kMax-1, Z_kMax < l_l_kMax)\cr P(Z_1 <- u_1), P(l_1 <- Z_1 < u_1, Z_2 < u_1),..., P(l_kMax-1 <- Z_kMax-1 < u_kMax-1, Z_kMax < u_l_kMax)\cr P(Z_1 <- Inf), P(l_1 <- Z_1 < u_1, Z_2 < Inf),..., P(l_kMax-1 <- Z_kMax-1 < u_kMax-1, Z_kMax < Inf)\cr with continuation matrix\cr l_1,...,l_kMax\cr u_1,...,u_kMax\cr For 4 rows, the continuation region contains of two regions and the probability matrix is obtained analogously (cf., Wassmer and Brannath, 2016). } \examples{ # Calculate Type I error rates in the two-sided group sequential setting when # performing kMax interim stages with constant critical boundaries at level alpha: alpha <- 0.05 kMax <- 10 decisionMatrix <- matrix(c( rep(-qnorm(1 - alpha / 2), kMax), rep(qnorm(1 - alpha / 2), kMax) ), nrow = 2, byrow = TRUE) informationRates <- (1:kMax) / kMax probs <- getGroupSequentialProbabilities(decisionMatrix, informationRates) cumsum(probs[3, ] - probs[2, ] + probs[1, ]) # Do the same for a one-sided design without futility boundaries: decisionMatrix <- matrix(c( rep(-Inf, kMax), rep(qnorm(1 - alpha), kMax) ), nrow = 2, byrow = TRUE) informationRates <- (1:kMax) / kMax probs <- getGroupSequentialProbabilities(decisionMatrix, informationRates) cumsum(probs[3, ] - probs[2, ]) # Check that two-sided Pampallona and Tsiatis boundaries with binding # futility bounds obtain Type I error probabilities equal to alpha: x <- getDesignGroupSequential( alpha = 0.05, beta = 0.1, kMax = 3, typeOfDesign = "PT", deltaPT0 = 0, deltaPT1 = 0.4, sided = 2, bindingFutility = TRUE ) dm <- matrix(c( -x$criticalValues, -x$futilityBounds, 0, x$futilityBounds, 0, x$criticalValues ), nrow = 4, byrow = TRUE) dm[is.na(dm)] <- 0 probs <- getGroupSequentialProbabilities( decisionMatrix = dm, informationRates = (1:3) / 3 ) sum(probs[5, ] - probs[4, ] + probs[1, ]) # Check the Type I error rate decrease when using non-binding futility bounds: x <- getDesignGroupSequential( alpha = 0.05, beta = 0.1, kMax = 3, typeOfDesign = "PT", deltaPT0 = 0, deltaPT1 = 0.4, sided = 2, bindingFutility = FALSE ) dm <- matrix(c( -x$criticalValues, -x$futilityBounds, 0, x$futilityBounds, 0, x$criticalValues ), nrow = 4, byrow = TRUE) dm[is.na(dm)] <- 0 probs <- getGroupSequentialProbabilities( decisionMatrix = dm, informationRates = (1:3) / 3 ) sum(probs[5, ] - probs[4, ] + probs[1, ]) } \seealso{ Other design functions: \code{\link{getDesignCharacteristics}()}, \code{\link{getDesignConditionalDunnett}()}, \code{\link{getDesignFisher}()}, \code{\link{getDesignGroupSequential}()}, \code{\link{getDesignInverseNormal}()}, \code{\link{getPowerAndAverageSampleNumber}()} } \concept{design functions} rpact/man/getRepeatedConfidenceIntervals.Rd0000644000176200001440000001111014427374265020534 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getRepeatedConfidenceIntervals} \alias{getRepeatedConfidenceIntervals} \title{Get Repeated Confidence Intervals} \usage{ getRepeatedConfidenceIntervals( design, dataInput, ..., directionUpper = TRUE, tolerance = 1e-06, stage = NA_integer_ ) } \arguments{ \item{design}{The trial design.} \item{dataInput}{The summary data used for calculating the test results. This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival} and should be created with the function \code{\link[=getDataset]{getDataset()}}. For more information see \code{\link[=getDataset]{getDataset()}}.} \item{...}{Further arguments to be passed to methods (cf., separate functions in "See Also" below), e.g., \describe{ \item{\code{normalApproximation}}{The type of computation of the p-values. Default is \code{FALSE} for testing means (i.e., the t test is used) and \code{TRUE} for testing rates and the hazard ratio. For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test (one sample) or the exact test of Fisher (two samples) is used for calculating the p-values. In the survival setting, \code{normalApproximation = FALSE} has no effect.} \item{\code{equalVariances}}{The type of t test. For testing means in two treatment groups, either the t test assuming that the variances are equal or the t test without assuming this, i.e., the test of Welch-Satterthwaite is calculated, default is \code{TRUE}.} \item{\code{intersectionTest}}{Defines the multiple test for the intersection hypotheses in the closed system of hypotheses when testing multiple hypotheses. Five options are available in multi-arm designs: \code{"Dunnett"}, \code{"Bonferroni"}, \code{"Simes"}, \code{"Sidak"}, and \code{"Hierarchical"}, default is \code{"Dunnett"}. Four options are available in population enrichment designs: \code{"SpiessensDebois"} (one subset only), \code{"Bonferroni"}, \code{"Simes"}, and \code{"Sidak"}, default is \code{"Simes"}.} \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple treatment arms (> 2) or population enrichment designs for testing means. For multiple arms, three options are available: \code{"overallPooled"}, \code{"pairwisePooled"}, and \code{"notPooled"}, default is \code{"overallPooled"}. For enrichment designs, the options are: \code{"pooled"}, \code{"pooledFromFull"} (one subset only), and \code{"notPooled"}, default is \code{"pooled"}.} \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. For testing means and rates, also a non-stratified analysis based on overall data can be performed. For survival data, only a stratified analysis is possible (see Brannath et al., 2009), default is \code{TRUE}.} }} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{tolerance}{The numerical tolerance, default is \code{1e-06}. Must be a positive numeric of length 1.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} } \value{ Returns a \code{\link[base]{matrix}} with \code{2} rows and \code{kMax} columns containing the lower RCI limits in the first row and the upper RCI limits in the second row, where each column represents a stage. } \description{ Calculates and returns the lower and upper limit of the repeated confidence intervals of the trial. } \details{ The repeated confidence interval at a given stage of the trial contains the parameter values that are not rejected using the specified sequential design. It can be calculated at each stage of the trial and can thus be used as a monitoring tool. The repeated confidence intervals are provided up to the specified stage. } \examples{ \dontrun{ design <- getDesignInverseNormal(kMax = 2) data <- getDataset( n = c( 20, 30), means = c( 50, 51), stDevs = c(130, 140) ) getRepeatedConfidenceIntervals(design, dataInput = data) } } \seealso{ Other analysis functions: \code{\link{getAnalysisResults}()}, \code{\link{getClosedCombinationTestResults}()}, \code{\link{getClosedConditionalDunnettTestResults}()}, \code{\link{getConditionalPower}()}, \code{\link{getConditionalRejectionProbabilities}()}, \code{\link{getFinalConfidenceInterval}()}, \code{\link{getFinalPValue}()}, \code{\link{getRepeatedPValues}()}, \code{\link{getStageResults}()}, \code{\link{getTestActions}()} } \concept{analysis functions} rpact/man/param_design_with_default.Rd0000644000176200001440000000111114335631010017603 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_design_with_default} \alias{param_design_with_default} \title{Parameter Description: Design with Default} \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} } \description{ Parameter Description: Design with Default } \keyword{internal} rpact/man/getDesignInverseNormal.Rd0000644000176200001440000002073414427374266017070 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_group_sequential.R \name{getDesignInverseNormal} \alias{getDesignInverseNormal} \title{Get Design Inverse Normal} \usage{ getDesignInverseNormal( ..., kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = 1L, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = c("OF", "P", "WT", "PT", "HP", "WToptimum", "asP", "asOF", "asKD", "asHSD", "asUser", "noEarlyEfficacy"), deltaWT = NA_real_, deltaPT1 = NA_real_, deltaPT0 = NA_real_, optimizationCriterion = c("ASNH1", "ASNIFH1", "ASNsum"), gammaA = NA_real_, typeBetaSpending = c("none", "bsP", "bsOF", "bsKD", "bsHSD", "bsUser"), userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = NA_real_, bindingFutility = NA, betaAdjustment = NA, constantBoundsHP = 3, twoSidedPower = NA, tolerance = 1e-08 ) } \arguments{ \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{kMax}{The maximum number of stages \code{K}. Must be a positive integer of length 1 (default value is \code{3}). The maximum selectable \code{kMax} is \code{20} for group sequential or inverse normal and \code{6} for Fisher combination test designs.} \item{alpha}{The significance level alpha, default is \code{0.025}. Must be a positive numeric of length 1.} \item{beta}{Type II error rate, necessary for providing sample size calculations (e.g., \code{\link[=getSampleSizeMeans]{getSampleSizeMeans()}}), beta spending function designs, or optimum designs, default is \code{0.20}. Must be a positive numeric of length 1.} \item{sided}{Is the alternative one-sided (\code{1}) or two-sided (\code{2}), default is \code{1}. Must be a positive integer of length 1.} \item{informationRates}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}.} \item{futilityBounds}{The futility bounds, defined on the test statistic z scale (numeric vector of length \code{kMax - 1}).} \item{typeOfDesign}{The type of design. Type of design is one of the following: O'Brien & Fleming (\code{"OF"}), Pocock (\code{"P"}), Wang & Tsiatis Delta class (\code{"WT"}), Pampallona & Tsiatis (\code{"PT"}), Haybittle & Peto ("HP"), Optimum design within Wang & Tsiatis class (\code{"WToptimum"}), O'Brien & Fleming type alpha spending (\code{"asOF"}), Pocock type alpha spending (\code{"asP"}), Kim & DeMets alpha spending (\code{"asKD"}), Hwang, Shi & DeCani alpha spending (\code{"asHSD"}), user defined alpha spending (\code{"asUser"}), no early efficacy stop (\code{"noEarlyEfficacy"}), default is \code{"OF"}.} \item{deltaWT}{Delta for Wang & Tsiatis Delta class.} \item{deltaPT1}{Delta1 for Pampallona & Tsiatis class rejecting H0 boundaries.} \item{deltaPT0}{Delta0 for Pampallona & Tsiatis class rejecting H1 boundaries.} \item{optimizationCriterion}{Optimization criterion for optimum design within Wang & Tsiatis class (\code{"ASNH1"}, \code{"ASNIFH1"}, \code{"ASNsum"}), default is \code{"ASNH1"}, see details.} \item{gammaA}{Parameter for alpha spending function.} \item{typeBetaSpending}{Type of beta spending. Type of of beta spending is one of the following: O'Brien & Fleming type beta spending, Pocock type beta spending, Kim & DeMets beta spending, Hwang, Shi & DeCani beta spending, user defined beta spending (\code{"bsOF"}, \code{"bsP"}, \code{"bsKD"}, \code{"bsHSD"}, \code{"bsUser"}, default is \code{"none"}).} \item{userAlphaSpending}{The user defined alpha spending. Numeric vector of length \code{kMax} containing the cumulative alpha-spending (Type I error rate) up to each interim stage: \code{0 <= alpha_1 <= ... <= alpha_K <= alpha}.} \item{userBetaSpending}{The user defined beta spending. Vector of length \code{kMax} containing the cumulative beta-spending up to each interim stage.} \item{gammaB}{Parameter for beta spending function.} \item{bindingFutility}{Logical. If \code{bindingFutility = TRUE} is specified the calculation of the critical values is affected by the futility bounds and the futility threshold is binding in the sense that the study must be stopped if the futility condition was reached (default is \code{FALSE}).} \item{betaAdjustment}{For two-sided beta spending designs, if \code{betaAdjustement = TRUE} a linear adjustment of the beta spending values is performed if an overlapping of decision regions for futility stopping at earlier stages occurs, otherwise no adjustment is performed (default is \code{TRUE}).} \item{constantBoundsHP}{The constant bounds up to stage \code{kMax - 1} for the Haybittle & Peto design (default is \code{3}).} \item{twoSidedPower}{For two-sided testing, if \code{twoSidedPower = TRUE} is specified the sample size calculation is performed by considering both tails of the distribution. Default is \code{FALSE}, i.e., it is assumed that one tail probability is equal to 0 or the power should be directed to one part.} \item{tolerance}{The numerical tolerance, default is \code{1e-08}.} } \value{ Returns a \code{\link{TrialDesign}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.TrialDesign]{plot()}} to plot the object, \item \code{\link[=as.data.frame.TrialDesign]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Provides adjusted boundaries and defines a group sequential design for its use in the inverse normal combination test. } \details{ Depending on \code{typeOfDesign} some parameters are specified, others not. For example, only if \code{typeOfDesign} \code{"asHSD"} is selected, \code{gammaA} needs to be specified. If an alpha spending approach was specified (\code{"asOF"}, \code{"asP"}, \code{"asKD"}, \code{"asHSD"}, or \code{"asUser"}) additionally a beta spending function can be specified to produce futility bounds. For optimum designs, \code{"ASNH1"} minimizes the expected sample size under H1, \code{"ASNIFH1"} minimizes the sum of the maximum sample and the expected sample size under H1, and \code{"ASNsum"} minimizes the sum of the maximum sample size, the expected sample size under a value midway H0 and H1, and the expected sample size under H1. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Calculate two-sided critical values for a four-stage # Wang & Tsiatis design with Delta = 0.25 at level alpha = 0.05 getDesignInverseNormal(kMax = 4, alpha = 0.05, sided = 2, typeOfDesign = "WT", deltaWT = 0.25) # Defines a two-stage design at one-sided alpha = 0.025 with provision of early stopping # if the one-sided p-value exceeds 0.5 at interim and no early stopping for efficacy. # The futility bound is non-binding. getDesignInverseNormal(kMax = 2, typeOfDesign = "noEarlyEfficacy", futilityBounds = 0) \dontrun{ # Calculate one-sided critical values and binding futility bounds for a three-stage # design with alpha- and beta-spending functions according to Kim & DeMets with gamma = 2.5 # (planned informationRates as specified, default alpha = 0.025 and beta = 0.2) getDesignInverseNormal(kMax = 3, informationRates = c(0.3, 0.75, 1), typeOfDesign = "asKD", gammaA = 2.5, typeBetaSpending = "bsKD", gammaB = 2.5, bindingFutility = TRUE) } } \seealso{ \code{\link[=getDesignSet]{getDesignSet()}} for creating a set of designs to compare different designs. Other design functions: \code{\link{getDesignCharacteristics}()}, \code{\link{getDesignConditionalDunnett}()}, \code{\link{getDesignFisher}()}, \code{\link{getDesignGroupSequential}()}, \code{\link{getGroupSequentialProbabilities}()}, \code{\link{getPowerAndAverageSampleNumber}()} } \concept{design functions} rpact/man/param_effectList.Rd0000644000176200001440000000067114335631011015676 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_effectList} \alias{param_effectList} \title{Parameter Description: Effect List} \arguments{ \item{effectList}{List of subsets, prevalences, and effect sizes with columns and number of rows reflecting the different situations to consider (see examples).} } \description{ Parameter Description: Effect List } \keyword{internal} rpact/man/getStageResults.Rd0000644000176200001440000001342514411251744015562 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getStageResults} \alias{getStageResults} \title{Get Stage Results} \usage{ getStageResults(design, dataInput, ..., stage = NA_integer_) } \arguments{ \item{design}{The trial design.} \item{dataInput}{The summary data used for calculating the test results. This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival} and should be created with the function \code{\link[=getDataset]{getDataset()}}. For more information see \code{\link[=getDataset]{getDataset()}}.} \item{...}{Further (optional) arguments to be passed: \describe{ \item{\code{thetaH0}}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{\code{normalApproximation}}{The type of computation of the p-values. Default is \code{FALSE} for testing means (i.e., the t test is used) and \code{TRUE} for testing rates and the hazard ratio. For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test (one sample) or the exact test of Fisher (two samples) is used for calculating the p-values. In the survival setting, \code{normalApproximation = FALSE} has no effect.} \item{\code{equalVariances}}{The type of t test. For testing means in two treatment groups, either the t test assuming that the variances are equal or the t test without assuming this, i.e., the test of Welch-Satterthwaite is calculated, default is \code{TRUE}.} \item{\code{directionUpper}}{The direction of one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{\code{intersectionTest}}{Defines the multiple test for the intersection hypotheses in the closed system of hypotheses when testing multiple hypotheses. Five options are available in multi-arm designs: \code{"Dunnett"}, \code{"Bonferroni"}, \code{"Simes"}, \code{"Sidak"}, and \code{"Hierarchical"}, default is \code{"Dunnett"}. Four options are available in population enrichment designs: \code{"SpiessensDebois"} (one subset only), \code{"Bonferroni"}, \code{"Simes"}, and \code{"Sidak"}, default is \code{"Simes"}.} \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple treatment arms (> 2) or population enrichment designs for testing means. For multiple arms, three options are available: \code{"overallPooled"}, \code{"pairwisePooled"}, and \code{"notPooled"}, default is \code{"overallPooled"}. For enrichment designs, the options are: \code{"pooled"}, \code{"pooledFromFull"} (one subset only), and \code{"notPooled"}, default is \code{"pooled"}.} \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. For testing means and rates, also a non-stratified analysis based on overall data can be performed. For survival data, only a stratified analysis is possible (see Brannath et al., 2009), default is \code{TRUE}.} }} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} } \value{ Returns a \code{\link{StageResults}} object. \itemize{ \item \code{\link[=names.StageResults]{names}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.StageResults]{plot()}} to plot the object, \item \code{\link[=as.data.frame.StageResults]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns summary statistics and p-values for a given data set and a given design. } \details{ Calculates and returns the stage results of the specified design and data input at the specified stage. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ design <- getDesignInverseNormal() dataRates <- getDataset( n1 = c(10, 10), n2 = c(20, 20), events1 = c( 8, 10), events2 = c(10, 16)) getStageResults(design, dataRates) } \seealso{ Other analysis functions: \code{\link{getAnalysisResults}()}, \code{\link{getClosedCombinationTestResults}()}, \code{\link{getClosedConditionalDunnettTestResults}()}, \code{\link{getConditionalPower}()}, \code{\link{getConditionalRejectionProbabilities}()}, \code{\link{getFinalConfidenceInterval}()}, \code{\link{getFinalPValue}()}, \code{\link{getRepeatedConfidenceIntervals}()}, \code{\link{getRepeatedPValues}()}, \code{\link{getTestActions}()} } \concept{analysis functions} rpact/man/param_accrualIntensityType.Rd0000644000176200001440000000113414232463334017773 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_accrualIntensityType} \alias{param_accrualIntensityType} \title{Parameter Description: Accrual Intensity Type} \arguments{ \item{accrualIntensityType}{A character value specifying the accrual intensity input type. Must be one of \code{"auto"}, \code{"absolute"}, or \code{"relative"}; default is \code{"auto"}, i.e., if all values are < 1 the type is \code{"relative"}, otherwise it is \code{"absolute"}.} } \description{ Parameter Description: Accrual Intensity Type } \keyword{internal} rpact/man/SimulationResultsSurvival.Rd0000644000176200001440000001570714450501306017677 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsSurvival} \alias{SimulationResultsSurvival} \title{Class for Simulation Results Survival} \description{ A class for simulation results survival. } \details{ Use \code{\link[=getSimulationSurvival]{getSimulationSurvival()}} to create an object of this type. \code{SimulationResultsSurvival} is the basic class for \itemize{ \item \code{\link{SimulationResultsSurvival}}, \item \code{\link{SimulationResultsMultiArmSurvival}}, and \item \code{\link{SimulationResultsEnrichmentSurvival}}. } } \section{Fields}{ \describe{ \item{\code{maxNumberOfIterations}}{The number of simulation iterations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{plannedEvents}}{Determines the number of cumulated (overall) events in survival designs when the interim stages are planned. For two treatment arms, is the number of events for both treatment arms. For multi-arm designs, refers to the overall number of events for the selected arms plus control. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{minNumberOfEventsPerStage}}{Determines the minimum number of events per stage for data-driven sample size recalculation. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{maxNumberOfEventsPerStage}}{Determines the maximum number of events per stage for data-driven sample size recalculation. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{calcEventsFunction}}{An optional function that can be entered to define how event size is recalculated. By default, recalculation is performed with conditional power with specified \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage}.} \item{\code{expectedNumberOfEvents}}{The expected number of events under specified alternative. Is a numeric vector.} \item{\code{pi1}}{The assumed event rate in the treatment group. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{pi2}}{The assumed event rate in the control group. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{median1}}{The assumed median survival time in the treatment group. Is a numeric vector.} \item{\code{median2}}{The assumed median survival time in the reference group. Is a numeric vector of length 1.} \item{\code{maxNumberOfSubjects}}{The maximum number of subjects for power calculations. Is a numeric vector.} \item{\code{accrualTime}}{The assumed accrual time intervals for the study. Is a numeric vector.} \item{\code{accrualIntensity}}{The absolute accrual intensities. Is a numeric vector of length \code{kMax}.} \item{\code{dropoutRate1}}{The assumed drop-out rate in the treatment group. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{dropoutRate2}}{The assumed drop-out rate in the control group. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{dropoutTime}}{The assumed time for drop-out rates in the control and treatment group. Is a numeric vector of length 1.} \item{\code{eventTime}}{The assumed time under which the event rates are calculated. Is a numeric vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{allocation1}}{The number of subjects to be assigned to treatment 1 in subsequent order. Is a numeric vector of length 1 containing a whole number.} \item{\code{allocation2}}{The number of subjects to be assigned to treatment 2 in subsequent order. Is a numeric vector of length 1 containing a whole number.} \item{\code{kappa}}{The shape of the Weibull distribution if \code{kappa!=1}. Is a numeric vector of length 1.} \item{\code{piecewiseSurvivalTime}}{The time intervals for the piecewise definition of the exponential survival time cumulative distribution function. Is a numeric vector.} \item{\code{lambda1}}{The assumed hazard rate in the treatment group. Is a numeric vector of length \code{kMax}.} \item{\code{lambda2}}{The assumed hazard rate in the reference group. Is a numeric vector of length 1.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{hazardRatio}}{The hazard ratios under consideration. Is a numeric vector of length \code{kMax}.} \item{\code{studyDuration}}{The study duration for specified effect size. Is a positive numeric vector.} \item{\code{eventsNotAchieved}}{The simulated number of cases how often the number of events was not reached. Is a numeric matrix.} \item{\code{numberOfSubjects}}{In simulation results data set: The number of subjects under consideration when the interim analysis takes place.} \item{\code{numberOfSubjects1}}{In simulation results data set: The number of subjects under consideration in treatment arm 1 when the interim analysis takes place.} \item{\code{numberOfSubjects2}}{In simulation results data set: The number of subjects under consideration in treatment arm 2 when the interim analysis takes place.} \item{\code{eventsPerStage}}{The number of events per stage. Is a numeric matrix.} \item{\code{overallEventsPerStage}}{The cumulative events over stages. Is a numeric matrix.} \item{\code{expectedNumberOfSubjects}}{The expected number of subjects under specified alternative.} \item{\code{rejectPerStage}}{The probability to reject a hypothesis per stage of the trial. Is a numeric matrix.} \item{\code{overallReject}}{The overall rejection probability. Is a numeric vector.} \item{\code{conditionalPowerAchieved}}{The calculated conditional power, under the assumption of observed or assumed effect sizes. Is a numeric matrix.} }} \keyword{internal} rpact/man/getSimulationMultiArmMeans.Rd0000644000176200001440000003631014445304766017730 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_multiarm_means.R \name{getSimulationMultiArmMeans} \alias{getSimulationMultiArmMeans} \title{Get Simulation Multi-Arm Means} \usage{ getSimulationMultiArmMeans( design = NULL, ..., activeArms = 3L, effectMatrix = NULL, typeOfShape = c("linear", "sigmoidEmax", "userDefined"), muMaxVector = seq(0, 1, 0.2), gED50 = NA_real_, slope = 1, intersectionTest = c("Dunnett", "Bonferroni", "Simes", "Sidak", "Hierarchical"), stDev = 1, adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), effectMeasure = c("effectEstimate", "testStatistic"), successCriterion = c("all", "atLeastOne"), epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedSubjects = NA_integer_, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, stDevH1 = NA_real_, maxNumberOfIterations = 1000L, seed = NA_real_, calcSubjectsFunction = NULL, selectArmsFunction = NULL, showStatistics = FALSE ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{activeArms}{The number of active treatment arms to be compared with control, default is \code{3}.} \item{effectMatrix}{Matrix of effect sizes with \code{activeArms} columns and number of rows reflecting the different situations to consider.} \item{typeOfShape}{The shape of the dose-response relationship over the treatment groups. This can be either \code{"linear"}, \code{"sigmoidEmax"}, or \code{"userDefined"}, default is \code{"linear"}.\cr For \code{"linear"}, \code{"muMaxVector"} specifies the range of effect sizes for the treatment group with highest response. If \code{"sigmoidEmax"} is selected, \code{"gED50"} and \code{"slope"} has to be entered to specify the ED50 and the slope of the sigmoid Emax model. For \code{"sigmoidEmax"}, \code{"muMaxVector"} specifies the range of effect sizes for the treatment group with response according to infinite dose. If \code{"userDefined"} is selected, \code{"effectMatrix"} has to be entered.} \item{muMaxVector}{Range of effect sizes for the treatment group with highest response for \code{"linear"} and \code{"sigmoidEmax"} model, default is \code{seq(0, 1, 0.2)}.} \item{gED50}{If \code{typeOfShape = "sigmoidEmax"} is selected, \code{"gED50"} has to be entered to specify the ED50 of the sigmoid Emax model.} \item{slope}{If \code{typeOfShape = "sigmoidEmax"} is selected, \code{"slope"} can be entered to specify the slope of the sigmoid Emax model, default is 1.} \item{intersectionTest}{Defines the multiple test for the intersection hypotheses in the closed system of hypotheses. Five options are available in multi-arm designs: \code{"Dunnett"}, \code{"Bonferroni"}, \code{"Simes"}, \code{"Sidak"}, and \code{"Hierarchical"}, default is \code{"Dunnett"}.} \item{stDev}{The standard deviation under which the data is simulated, default is \code{1}. If \code{meanRatio = TRUE} is specified, \code{stDev} defines the coefficient of variation \code{sigma / mu2}. Must be a positive numeric of length 1.} \item{adaptations}{A logical vector of length \code{kMax - 1} indicating whether or not an adaptation takes place at interim k, default is \code{rep(TRUE, kMax - 1)}.} \item{typeOfSelection}{The way the treatment arms or populations are selected at interim. Five options are available: \code{"best"}, \code{"rbest"}, \code{"epsilon"}, \code{"all"}, and \code{"userDefined"}, default is \code{"best"}.\cr For \code{"rbest"} (select the \code{rValue} best treatment arms/populations), the parameter \code{rValue} has to be specified, for \code{"epsilon"} (select treatment arm/population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. If \code{"userDefined"} is selected, \code{"selectArmsFunction"} or \code{"selectPopulationsFunction"} has to be specified.} \item{effectMeasure}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (difference for means and rates or ratio for survival) (\code{"effectEstimate"}), default is \code{"effectEstimate"}.} \item{successCriterion}{Defines when the study is stopped for efficacy at interim. Two options are available: \code{"all"} stops the trial if the efficacy criterion is fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim, default is \code{"all"}.} \item{epsilonValue}{For \code{typeOfSelection = "epsilon"} (select treatment arm / population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. Must be a numeric of length 1.} \item{rValue}{For \code{typeOfSelection = "rbest"} (select the \code{rValue} best treatment arms / populations), the parameter \code{rValue} has to be specified.} \item{threshold}{Selection criterion: treatment arm / population is selected only if \code{effectMeasure} exceeds \code{threshold}, default is \code{-Inf}. \code{threshold} can also be a vector of length \code{activeArms} referring to a separate threshold condition over the treatment arms.} \item{plannedSubjects}{\code{plannedSubjects} is a numeric vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs, \code{plannedSubjects} refers to the number of subjects per selected active arm.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{minNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{minNumberOfSubjectsPerStage} with length kMax determines the minimum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{minNumberOfSubjectsPerStage} refers to the minimum number of subjects per selected active arm.} \item{maxNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{maxNumberOfSubjectsPerStage} with length kMax determines the maximum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{maxNumberOfSubjectsPerStage} refers to the maximum number of subjects per selected active arm.} \item{conditionalPower}{If \code{conditionalPower} together with \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (or \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} for survival designs) is specified, a sample size recalculation based on the specified conditional power is performed. It is defined as the power for the subsequent stage given the current data. By default, the conditional power will be calculated under the observed effect size. Optionally, you can also specify \code{thetaH1} and \code{stDevH1} (for simulating means), \code{pi1H1} and \code{pi2H1} (for simulating rates), or \code{thetaH1} (for simulating hazard ratios) as parameters under which it is calculated and the sample size recalculation is performed.} \item{thetaH1}{If specified, the value of the alternative under which the conditional power or sample size recalculation calculation is performed. Must be a numeric of length 1.} \item{stDevH1}{If specified, the value of the standard deviation under which the conditional power or sample size recalculation calculation is performed, default is the value of \code{stDev}. Must be a positive numeric of length 1.} \item{maxNumberOfIterations}{The number of simulation iterations, default is \code{1000}. Must be a positive integer of length 1.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} \item{calcSubjectsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, sample size recalculation is performed with conditional power and specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details and examples).} \item{selectArmsFunction}{Optionally, a function can be entered that defines the way of how treatment arms are selected. This function is allowed to depend on \code{effectVector} with length \code{activeArms} and \code{stage} (see examples).} \item{showStatistics}{Logical. If \code{TRUE}, summary statistics of the simulated data are displayed for the \code{print} command, otherwise the output is suppressed, default is \code{FALSE}.} } \value{ Returns a \code{\link{SimulationResults}} object. The following generics (R generic functions) are available for this object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.SimulationResults]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the simulated power, stopping and selection probabilities, conditional power, and expected sample size for testing means in a multi-arm treatment groups testing situation. } \details{ At given design the function simulates the power, stopping probabilities, selection probabilities, and expected sample size at given number of subjects, parameter configuration, and treatment arm selection rule in the multi-arm situation. An allocation ratio can be specified referring to the ratio of number of subjects in the active treatment groups as compared to the control group. The definition of \code{thetaH1} and/or \code{stDevH1} makes only sense if \code{kMax} > 1 and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. \code{calcSubjectsFunction}\cr This function returns the number of subjects at given conditional power and conditional critical value for specified testing situation. The function might depend on the variables \code{stage}, \code{selectedArms}, \code{plannedSubjects}, \code{allocationRatioPlanned}, \code{minNumberOfSubjectsPerStage}, \code{maxNumberOfSubjectsPerStage}, \code{conditionalPower}, \code{conditionalCriticalValue}, \code{overallEffects}, and \code{stDevH1}. The function has to contain the three-dots argument '...' (see examples). } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ \dontrun{ # Assess a treatment-arm selection strategy with three active arms, # if the better of the arms is selected for the second stage, and # compare it with the no-selection case. # Assume a linear dose-response relationship maxNumberOfIterations <- 100 designIN <- getDesignInverseNormal(typeOfDesign = "OF", kMax = 2) sim <- getSimulationMultiArmMeans(design = designIN, activeArms = 3, typeOfShape = "linear", muMaxVector = seq(0,0.8,0.2), intersectionTest = "Simes", typeOfSelection = "best", plannedSubjects = c(30,60), maxNumberOfIterations = maxNumberOfIterations) sim0 <- getSimulationMultiArmMeans(design = designIN, activeArms = 3, typeOfShape = "linear", muMaxVector = seq(0,0.8,0.2), intersectionTest = "Simes", typeOfSelection = "all", plannedSubjects = c(30,60), maxNumberOfIterations = maxNumberOfIterations) sim$rejectAtLeastOne sim$expectedNumberOfSubjects sim0$rejectAtLeastOne sim0$expectedNumberOfSubjects # Compare the power of the conditional Dunnett test with the power of the # combination test using Dunnett's intersection tests if no treatment arm # selection takes place. Asseume a linear dose-response relationship. maxNumberOfIterations <- 100 designIN <- getDesignInverseNormal(typeOfDesign = "asUser", userAlphaSpending = c(0, 0.025)) designCD <- getDesignConditionalDunnett(secondStageConditioning = TRUE) index <- 1 for (design in c(designIN, designCD)) { results <- getSimulationMultiArmMeans(design, activeArms = 3, muMaxVector = seq(0, 1, 0.2), typeOfShape = "linear", plannedSubjects = cumsum(rep(20, 2)), intersectionTest = "Dunnett", typeOfSelection = "all", maxNumberOfIterations = maxNumberOfIterations) if (index == 1) { drift <- results$effectMatrix[nrow(results$effectMatrix), ] plot(drift, results$rejectAtLeastOne, type = "l", lty = 1, lwd = 3, col = "black", ylab = "Power") } else { lines(drift,results$rejectAtLeastOne, type = "l", lty = index, lwd = 3, col = "red") } index <- index + 1 } legend("topleft", legend=c("Combination Dunnett", "Conditional Dunnett"), col=c("black", "red"), lty = (1:2), cex = 0.8) # Assess the design characteristics of a user defined selection # strategy in a two-stage design using the inverse normal method # with constant bounds. Stopping for futility due to # de-selection of all treatment arms. designIN <- getDesignInverseNormal(typeOfDesign = "P", kMax = 2) mySelection <- function(effectVector) { selectedArms <- (effectVector >= c(0, 0.1, 0.3)) return(selectedArms) } results <- getSimulationMultiArmMeans(designIN, activeArms = 3, muMaxVector = seq(0, 1, 0.2), typeOfShape = "linear", plannedSubjects = c(30,60), intersectionTest = "Dunnett", typeOfSelection = "userDefined", selectArmsFunction = mySelection, maxNumberOfIterations = 100) options(rpact.summary.output.size = "medium") summary(results) if (require(ggplot2)) plot(results, type = c(5,3,9), grid = 4) } } rpact/man/param_eventTime.Rd0000644000176200001440000000057014232463333015552 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_eventTime} \alias{param_eventTime} \title{Parameter Description: Event Time} \arguments{ \item{eventTime}{The assumed time under which the event rates are calculated, default is \code{12}.} } \description{ Parameter Description: Event Time } \keyword{internal} rpact/man/length.TrialDesignSet.Rd0000644000176200001440000000131614343406410016566 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{length.TrialDesignSet} \alias{length.TrialDesignSet} \title{Length of Trial Design Set} \usage{ \method{length}{TrialDesignSet}(x) } \arguments{ \item{x}{A \code{\link{TrialDesignSet}} object.} } \value{ Returns a non-negative \code{\link[base]{integer}} of length 1 representing the number of design in the \code{TrialDesignSet}. } \description{ Returns the number of designs in a \code{TrialDesignSet}. } \details{ Is helpful for iteration over all designs in a design set. } \examples{ designSet <- getDesignSet(design = getDesignGroupSequential(), alpha = c(0.01, 0.05)) length(designSet) } \keyword{internal} rpact/man/StageResultsRates.Rd0000644000176200001440000000447514450467342016074 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsRates} \alias{StageResultsRates} \title{Stage Results of Rates} \description{ Class for stage results of rates. } \details{ This object cannot be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of rates. } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{overallTestStatistics}}{The overall, i.e., cumulated test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{pValues}}{The stage-wise p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{overallPValues}}{The overall, i.e., cumulated p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{direction}}{Specifies the direction of the alternative, is either "upper" or "lower". Only applicable for one-sided testing.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{weightsFisher}}{The weights for Fisher's combination test. Is a numeric vector of length \code{kMax}.} \item{\code{weightsInverseNormal}}{The weights for the inverse normal statistic. Is a numeric vector of length \code{kMax}.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{...}}{Names of \code{dataInput}.} }} \keyword{internal} rpact/man/param_calcEventsFunction.Rd0000644000176200001440000000120114370155527017405 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_calcEventsFunction} \alias{param_calcEventsFunction} \title{Parameter Description: Calculate Events Function} \arguments{ \item{calcEventsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, event number recalculation is performed with conditional power and specified \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} (see details and examples).} } \description{ Parameter Description: Calculate Events Function } \keyword{internal} rpact/man/param_accrualIntensity.Rd0000644000176200001440000000074114335631011017125 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_accrualIntensity} \alias{param_accrualIntensity} \title{Parameter Description: Accrual Intensity} \arguments{ \item{accrualIntensity}{A numeric vector of accrual intensities, default is the relative intensity \code{0.1} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} } \description{ Parameter Description: Accrual Intensity } \keyword{internal} rpact/man/getSimulationRates.Rd0000644000176200001440000003535514417202031016255 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_base_rates.R \name{getSimulationRates} \alias{getSimulationRates} \title{Get Simulation Rates} \usage{ getSimulationRates( design = NULL, ..., groups = 2L, normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = seq(0.2, 0.5, 0.1), pi2 = NA_real_, plannedSubjects = NA_real_, directionUpper = TRUE, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, pi1H1 = NA_real_, pi2H1 = NA_real_, maxNumberOfIterations = 1000L, seed = NA_real_, calcSubjectsFunction = NULL, showStatistics = FALSE ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{normalApproximation}{The type of computation of the p-values. Default is \code{FALSE} for testing means (i.e., the t test is used) and \code{TRUE} for testing rates and the hazard ratio. For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test (one sample) or the exact test of Fisher (two samples) is used for calculating the p-values. In the survival setting \code{normalApproximation = FALSE} has no effect.} \item{riskRatio}{If \code{TRUE}, the design characteristics for one-sided testing of H0: \code{pi1 / pi2 = thetaH0} are simulated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{pi1}{A numeric value or vector that represents the assumed probability in the active treatment group if two treatment groups are considered, or the alternative probability for a one treatment group design, default is \code{seq(0.2, 0.5, 0.1)} (power calculations and simulations) or \code{seq(0.4, 0.6, 0.1)} (sample size calculations).} \item{pi2}{A numeric value that represents the assumed probability in the reference group if two treatment groups are considered, default is \code{0.2}.} \item{plannedSubjects}{\code{plannedSubjects} is a numeric vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs, \code{plannedSubjects} refers to the number of subjects per selected active arm.} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{minNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{minNumberOfSubjectsPerStage} with length kMax determines the minimum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{minNumberOfSubjectsPerStage} refers to the minimum number of subjects per selected active arm.} \item{maxNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{maxNumberOfSubjectsPerStage} with length kMax determines the maximum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{maxNumberOfSubjectsPerStage} refers to the maximum number of subjects per selected active arm.} \item{conditionalPower}{If \code{conditionalPower} together with \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (or \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} for survival designs) is specified, a sample size recalculation based on the specified conditional power is performed. It is defined as the power for the subsequent stage given the current data. By default, the conditional power will be calculated under the observed effect size. Optionally, you can also specify \code{thetaH1} and \code{stDevH1} (for simulating means), \code{pi1H1} and \code{pi2H1} (for simulating rates), or \code{thetaH1} (for simulating hazard ratios) as parameters under which it is calculated and the sample size recalculation is performed.} \item{pi1H1}{If specified, the assumed probability in the active treatment group if two treatment groups are considered, or the assumed probability for a one treatment group design, for which the conditional power was calculated.} \item{pi2H1}{If specified, the assumed probability in the reference group if two treatment groups are considered, for which the conditional power was calculated.} \item{maxNumberOfIterations}{The number of simulation iterations, default is \code{1000}. Must be a positive integer of length 1.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} \item{calcSubjectsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, sample size recalculation is performed with conditional power and specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details and examples).} \item{showStatistics}{Logical. If \code{TRUE}, summary statistics of the simulated data are displayed for the \code{print} command, otherwise the output is suppressed, default is \code{FALSE}.} } \value{ Returns a \code{\link{SimulationResults}} object. The following generics (R generic functions) are available for this object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.SimulationResults]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the simulated power, stopping probabilities, conditional power, and expected sample size for testing rates in a one or two treatment groups testing situation. } \details{ At given design the function simulates the power, stopping probabilities, conditional power, and expected sample size at given number of subjects and parameter configuration. Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number of subjects in the two treatment groups. The definition of \code{pi1H1} and/or \code{pi2H1} makes only sense if \code{kMax} > 1 and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. \code{calcSubjectsFunction}\cr This function returns the number of subjects at given conditional power and conditional critical value for specified testing situation. The function might depend on variables \code{stage}, \code{riskRatio}, \code{thetaH0}, \code{groups}, \code{plannedSubjects}, \code{sampleSizesPerStage}, \code{directionUpper}, \code{allocationRatioPlanned}, \code{minNumberOfSubjectsPerStage}, \code{maxNumberOfSubjectsPerStage}, \code{conditionalPower}, \code{conditionalCriticalValue}, \code{overallRate}, \code{farringtonManningValue1}, and \code{farringtonManningValue2}. The function has to contain the three-dots argument '...' (see examples). } \section{Simulation Data}{ The summary statistics "Simulated data" contains the following parameters: median \link{range}; mean +/-sd\cr \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable the output of the aggregated simulated data.\cr Example 1: \cr \code{simulationResults <- getSimulationRates(plannedSubjects = 40)} \cr \code{simulationResults$show(showStatistics = FALSE)}\cr Example 2: \cr \code{simulationResults <- getSimulationRates(plannedSubjects = 40)} \cr \code{simulationResults$setShowStatistics(FALSE)}\cr \code{simulationResults}\cr \code{\link[=getData]{getData()}} can be used to get the aggregated simulated data from the object as \code{\link[base]{data.frame}}. The data frame contains the following columns: \enumerate{ \item \code{iterationNumber}: The number of the simulation iteration. \item \code{stageNumber}: The stage. \item \code{pi1}: The assumed or derived event rate in the treatment group (if available). \item \code{pi2}: The assumed or derived event rate in the control group (if available). \item \code{numberOfSubjects}: The number of subjects under consideration when the (interim) analysis takes place. \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. \item \code{testStatistic}: The test statistic that is used for the test decision, depends on which design was chosen (group sequential, inverse normal, or Fisher combination test)' \item \code{testStatisticsPerStage}: The test statistic for each stage if only data from the considered stage is taken into account. \item \code{overallRate1}: The cumulative rate in treatment group 1. \item \code{overallRate2}: The cumulative rate in treatment group 2. \item \code{stagewiseRates1}: The stage-wise rate in treatment group 1. \item \code{stagewiseRates2}: The stage-wise rate in treatment group 2. \item \code{sampleSizesPerStage1}: The stage-wise sample size in treatment group 1. \item \code{sampleSizesPerStage2}: The stage-wise sample size in treatment group 2. \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for selected sample size and effect. The effect is either estimated from the data or can be user defined with \code{pi1H1} and \code{pi2H1}. } } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Fixed sample size design (two groups) with total sample # size 120, pi1 = (0.3,0.4,0.5,0.6) and pi2 = 0.3 getSimulationRates(pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, plannedSubjects = 120, maxNumberOfIterations = 10) \dontrun{ # Increase number of simulation iterations and compare results with power calculator getSimulationRates(pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, plannedSubjects = 120, maxNumberOfIterations = 50) getPowerRates(pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, maxNumberOfSubjects = 120) # Do the same for a two-stage Pocock inverse normal group sequential # design with non-binding futility stops designIN <- getDesignInverseNormal(typeOfDesign = "P", futilityBounds = c(0)) getSimulationRates(designIN, pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, plannedSubjects = c(40, 80), maxNumberOfIterations = 50) getPowerRates(designIN, pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, maxNumberOfSubjects = 80) # Assess power and average sample size if a sample size reassessment is # foreseen at conditional power 80\% for the subsequent stage (decrease and increase) # based on observed overall (cumulative) rates and specified minNumberOfSubjectsPerStage # and maxNumberOfSubjectsPerStage # Do the same under the assumption that a sample size increase only takes place # if the rate difference exceeds the value 0.1 at interim. For this, the sample # size recalculation method needs to be redefined: mySampleSizeCalculationFunction <- function(..., stage, plannedSubjects, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, conditionalCriticalValue, overallRate) { if (overallRate[1] - overallRate[2] < 0.1) { return(plannedSubjects[stage] - plannedSubjects[stage - 1]) } else { rateUnderH0 <- (overallRate[1] + overallRate[2]) / 2 stageSubjects <- 2 * (max(0, conditionalCriticalValue * sqrt(2 * rateUnderH0 * (1 - rateUnderH0)) + stats::qnorm(conditionalPower) * sqrt(overallRate[1] * (1 - overallRate[1]) + overallRate[2] * (1 - overallRate[2]))))^2 / (max(1e-12, (overallRate[1] - overallRate[2])))^2 stageSubjects <- ceiling(min(max( minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage])) return(stageSubjects) } } getSimulationRates(designIN, pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, plannedSubjects = c(40, 80), minNumberOfSubjectsPerStage = c(40, 20), maxNumberOfSubjectsPerStage = c(40, 160), conditionalPower = 0.8, calcSubjectsFunction = mySampleSizeCalculationFunction, maxNumberOfIterations = 50) } } rpact/man/print.SummaryFactory.Rd0000644000176200001440000000173714450462767016574 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_summary.R \name{print.SummaryFactory} \alias{print.SummaryFactory} \title{Summary Factory Printing} \usage{ \method{print}{SummaryFactory}(x, ..., markdown = FALSE, showSummary = FALSE, sep = "\\n-----\\n\\n") } \arguments{ \item{x}{The summary factory object.} \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} \item{markdown}{If \code{TRUE}, the object \code{x} will be printed using markdown syntax; normal representation will be used otherwise (default is \code{FALSE})} \item{showSummary}{Show the summary before creating the print output, default is \code{FALSE}.} \item{sep}{The separator line between the summary and the print output.} } \description{ Prints the result object stored inside a summary factory. } \details{ Generic function to print all kinds of summary factories. } rpact/man/param_seed.Rd0000644000176200001440000000051214232463334014527 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_seed} \alias{param_seed} \title{Parameter Description: Seed} \arguments{ \item{seed}{The seed to reproduce the simulation, default is a random seed.} } \description{ Parameter Description: Seed } \keyword{internal} rpact/man/param_tolerance.Rd0000644000176200001440000000057514312324046015567 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_tolerance} \alias{param_tolerance} \title{Parameter Description: Tolerance} \arguments{ \item{tolerance}{The numerical tolerance, default is \code{1e-06}. Must be a positive numeric of length 1.} } \description{ Parameter Description: Tolerance } \keyword{internal} rpact/man/param_conditionalPowerSimulation.Rd0000644000176200001440000000211614335631011021167 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_conditionalPowerSimulation} \alias{param_conditionalPowerSimulation} \title{Parameter Description: Conditional Power} \arguments{ \item{conditionalPower}{If \code{conditionalPower} together with \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (or \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} for survival designs) is specified, a sample size recalculation based on the specified conditional power is performed. It is defined as the power for the subsequent stage given the current data. By default, the conditional power will be calculated under the observed effect size. Optionally, you can also specify \code{thetaH1} and \code{stDevH1} (for simulating means), \code{pi1H1} and \code{pi2H1} (for simulating rates), or \code{thetaH1} (for simulating hazard ratios) as parameters under which it is calculated and the sample size recalculation is performed.} } \description{ Parameter Description: Conditional Power } \keyword{internal} rpact/man/Dataset.Rd0000644000176200001440000000140314450467342014021 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \docType{class} \name{Dataset} \alias{Dataset} \title{Dataset} \description{ Basic class for datasets. } \details{ \code{Dataset} is the basic class for \itemize{ \item \code{\link{DatasetMeans}}, \item \code{\link{DatasetRates}}, \item \code{\link{DatasetSurvival}}, and \item \code{\link{DatasetEnrichmentSurvival}}. } This basic class contains the fields \code{stages} and \code{groups} and several commonly used functions. } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{groups}}{The group numbers. Is a numeric vector.} }} \keyword{internal} rpact/man/param_dropoutRate1.Rd0000644000176200001440000000060114232463333016176 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_dropoutRate1} \alias{param_dropoutRate1} \title{Parameter Description: Dropout Rate (1)} \arguments{ \item{dropoutRate1}{The assumed drop-out rate in the treatment group, default is \code{0}.} } \description{ Parameter Description: Dropout Rate (1) } \keyword{internal} rpact/man/param_epsilonValue.Rd0000644000176200001440000000100214335631011016241 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_epsilonValue} \alias{param_epsilonValue} \title{Parameter Description: Epsilon Value} \arguments{ \item{epsilonValue}{For \code{typeOfSelection = "epsilon"} (select treatment arm / population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. Must be a numeric of length 1.} } \description{ Parameter Description: Epsilon Value } \keyword{internal} rpact/man/param_directionUpper.Rd0000644000176200001440000000077714335631010016610 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_directionUpper} \alias{param_directionUpper} \title{Parameter Description: Direction Upper} \arguments{ \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} } \description{ Parameter Description: Direction Upper } \keyword{internal} rpact/man/param_pi1_rates.Rd0000644000176200001440000000116714335631010015475 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_pi1_rates} \alias{param_pi1_rates} \title{Parameter Description: Pi (1) for Rates} \arguments{ \item{pi1}{A numeric value or vector that represents the assumed probability in the active treatment group if two treatment groups are considered, or the alternative probability for a one treatment group design, default is \code{seq(0.2, 0.5, 0.1)} (power calculations and simulations) or \code{seq(0.4, 0.6, 0.1)} (sample size calculations).} } \description{ Parameter Description: Pi (1) for Rates } \keyword{internal} rpact/man/getConditionalRejectionProbabilities.Rd0000644000176200001440000000526714411251744021761 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getConditionalRejectionProbabilities} \alias{getConditionalRejectionProbabilities} \title{Get Conditional Rejection Probabilities} \usage{ getConditionalRejectionProbabilities(stageResults, ...) } \arguments{ \item{stageResults}{The results at given stage, obtained from \code{\link[=getStageResults]{getStageResults()}}.} \item{...}{Further (optional) arguments to be passed: \describe{ \item{\code{iterations}}{Iterations for simulating the conditional rejection probabilities for Fisher's combination test. For checking purposes, it can be estimated via simulation with specified \code{iterations}.} \item{\code{seed}}{Seed for simulating the conditional rejection probabilities for Fisher's combination test. See above, default is a random seed.} }} } \value{ Returns a \code{\link[base]{numeric}} vector of length \code{kMax} or in case of multi-arm stage results a \code{\link[base]{matrix}} (each column represents a stage, each row a comparison) containing the conditional rejection probabilities. } \description{ Calculates the conditional rejection probabilities (CRP) for given test results. } \details{ The conditional rejection probability is the probability, under H0, to reject H0 in one of the subsequent (remaining) stages. The probability is calculated using the specified design. For testing rates and the survival design, the normal approximation is used, i.e., it is calculated with the use of the prototype case testing a mean for normally distributed data with known variance. The conditional rejection probabilities are provided up to the specified stage. For Fisher's combination test, you can check the validity of the CRP calculation via simulation. } \examples{ \dontrun{ # Calculate CRP for a Fisher's combination test design with # two remaining stages and check the results by simulation. design <- getDesignFisher(kMax = 4, informationRates = c(0.1, 0.3, 0.8, 1), alpha = 0.01) data <- getDataset(n = c(40, 40), events = c(20, 22)) sr <- getStageResults(design, data, thetaH0 = 0.4) getConditionalRejectionProbabilities(sr) getConditionalRejectionProbabilities(sr, simulateCRP = TRUE, seed = 12345, iterations = 10000) } } \seealso{ Other analysis functions: \code{\link{getAnalysisResults}()}, \code{\link{getClosedCombinationTestResults}()}, \code{\link{getClosedConditionalDunnettTestResults}()}, \code{\link{getConditionalPower}()}, \code{\link{getFinalConfidenceInterval}()}, \code{\link{getFinalPValue}()}, \code{\link{getRepeatedConfidenceIntervals}()}, \code{\link{getRepeatedPValues}()}, \code{\link{getStageResults}()}, \code{\link{getTestActions}()} } \concept{analysis functions} rpact/man/StageResultsMultiArmSurvival.Rd0000644000176200001440000000574414450467342020304 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsMultiArmSurvival} \alias{StageResultsMultiArmSurvival} \title{Stage Results Multi Arm Survival} \description{ Class for stage results of multi arm survival data } \details{ This object cannot be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of multi arm survival. } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{pValues}}{The stage-wise p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{weightsFisher}}{The weights for Fisher's combination test. Is a numeric vector of length \code{kMax}.} \item{\code{weightsInverseNormal}}{The weights for the inverse normal statistic. Is a numeric vector of length \code{kMax}.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{overallTestStatistics}}{The overall, i.e., cumulated test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{overallPValues}}{The overall, i.e., cumulated p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{separatePValues}}{The p-values from the separate stages. Is a numeric matrix.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{singleStepAdjustedPValues}}{The adjusted p-value for testing multiple hypotheses per stage of the trial.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} }} \keyword{internal} rpact/man/TrialDesignConditionalDunnett.Rd0000644000176200001440000000554614450467342020403 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesignConditionalDunnett} \alias{TrialDesignConditionalDunnett} \title{Conditional Dunnett Design} \description{ Trial design for conditional Dunnett tests. } \details{ This object should not be created directly; use \code{\link{getDesignConditionalDunnett}} with suitable arguments to create a conditional Dunnett test design. } \section{Fields}{ \describe{ \item{\code{kMax}}{The maximum number of stages \code{K}. Is a numeric vector of length 1 containing a whole number.} \item{\code{alpha}}{The significance level alpha, default is 0.025. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{informationRates}}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{userAlphaSpending}}{The user defined alpha spending. Contains the cumulative alpha-spending (type I error rate) up to each interim stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{criticalValues}}{The critical values for each stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{stageLevels}}{The adjusted significance levels to reach significance in a group sequential design. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{alphaSpent}}{The cumulative alpha spent at each stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{bindingFutility}}{If \code{TRUE}, the calculation of the critical values is affected by the futility bounds and the futility threshold is binding in the sense that the study must be stopped if the futility condition was reached (default is \code{FALSE}) Is a logical vector of length 1.} \item{\code{tolerance}}{The numerical tolerance, default is \code{1e-06}. Is a numeric vector of length 1.} \item{\code{informationAtInterim}}{The information to be expected at interim, default is informationAtInterim = 0.5. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{secondStageConditioning}}{The way the second stage p-values are calculated within the closed system of hypotheses. If \code{FALSE}, the unconditional adjusted p-values are used, otherwise conditional adjusted p-values are calculated. Is a logical vector of length 1.} \item{\code{sided}}{Describes if the alternative is one-sided (\code{1}) or two-sided (\code{2}). Is a numeric vector of length 1 containing a whole number.} }} \seealso{ \code{\link{getDesignConditionalDunnett}} for creating a conditional Dunnett test design. } \keyword{internal} rpact/man/plot.SimulationResults.Rd0000644000176200001440000001220714402556624017122 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \name{plot.SimulationResults} \alias{plot.SimulationResults} \title{Simulation Results Plotting} \usage{ \method{plot}{SimulationResults}( x, y, ..., main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1L, palette = "Set1", theta = seq(-1, 1, 0.01), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, grid = 1, plotSettings = NULL ) } \arguments{ \item{x}{The simulation results, obtained from \cr \code{\link[=getSimulationSurvival]{getSimulationSurvival()}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{type}{The plot type (default = \code{1}). The following plot types are available: \itemize{ \item \code{1}: creates a 'Overall Success' plot (multi-arm and enrichment only) \item \code{2}: creates a 'Success per Stage' plot (multi-arm and enrichment only) \item \code{3}: creates a 'Selected Arms per Stage' plot (multi-arm and enrichment only) \item \code{4}: creates a 'Reject per Stage' or 'Rejected Arms per Stage' plot \item \code{5}: creates a 'Overall Power and Early Stopping' plot \item \code{6}: creates a 'Expected Number of Subjects and Power / Early Stop' or 'Expected Number of Events and Power / Early Stop' plot \item \code{7}: creates an 'Overall Power' plot \item \code{8}: creates an 'Overall Early Stopping' plot \item \code{9}: creates an 'Expected Sample Size' or 'Expected Number of Events' plot \item \code{10}: creates a 'Study Duration' plot (non-multi-arm and non-enrichment survival only) \item \code{11}: creates an 'Expected Number of Subjects' plot (non-multi-arm and non-enrichment survival only) \item \code{12}: creates an 'Analysis Times' plot (non-multi-arm and non-enrichment survival only) \item \code{13}: creates a 'Cumulative Distribution Function' plot (non-multi-arm and non-enrichment survival only) \item \code{14}: creates a 'Survival Function' plot (non-multi-arm and non-enrichment survival only) \item \code{"all"}: creates all available plots and returns it as a grid plot or list }} \item{palette}{The palette, default is \code{"Set1"}.} \item{theta}{A vector of standardized effect sizes (theta values), default is a sequence from -1 to 1.} \item{plotPointsEnabled}{Logical. If \code{TRUE}, additional points will be plotted.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{Logical. If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with the base R \code{plot} function. Alternatively \code{showSource} can be defined as one of the following character values: \itemize{ \item \code{"commands"}: returns a character vector with plot commands \item \code{"axes"}: returns a list with the axes definitions \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function does not stop if an error occurs) \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function stops if an error occurs) } Note: no plot object will be returned if \code{showSource} is a character.} \item{grid}{An integer value specifying the output of multiple plots. By default (\code{1}) a list of \code{ggplot} objects will be returned. If a \code{grid} value > 1 was specified, a grid plot will be returned if the number of plots is <= specified \code{grid} value; a list of \code{ggplot} objects will be returned otherwise. If \code{grid = 0} is specified, all plots will be created using \code{\link[base]{print}} command and a list of \code{ggplot} objects will be returned invisible. Note that one of the following packages must be installed to create a grid plot: 'ggpubr', 'gridExtra', or 'cowplot'.} \item{plotSettings}{An object of class \code{PlotSettings} created by \code{\link[=getPlotSettings]{getPlotSetting()s}}.} } \value{ Returns a \code{ggplot2} object. } \description{ Plots simulation results. } \details{ Generic function to plot all kinds of simulation results. } \examples{ \dontrun{ results <- getSimulationMeans( alternative = 0:4, stDev = 5, plannedSubjects = 40, maxNumberOfIterations = 1000 ) plot(results, type = 5) } } rpact/man/getDataset.Rd0000644000176200001440000002616214372411346014526 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{getDataset} \alias{getDataset} \alias{getDataSet} \title{Get Dataset} \usage{ getDataset(..., floatingPointNumbersEnabled = FALSE) getDataSet(..., floatingPointNumbersEnabled = FALSE) } \arguments{ \item{...}{A \code{data.frame} or some data vectors defining the dataset.} \item{floatingPointNumbersEnabled}{If \code{TRUE}, sample sizes and event numbers can be specified as floating-point numbers (this make sense, e.g., for theoretical comparisons); \cr by default \code{floatingPointNumbersEnabled = FALSE}, i.e., samples sizes and event numbers defined as floating-point numbers will be truncated.} } \value{ Returns a \code{\link{Dataset}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.Dataset]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Creates a dataset object and returns it. } \details{ The different dataset types \code{DatasetMeans}, of \code{DatasetRates}, or \code{DatasetSurvival} can be created as follows: \itemize{ \item An element of \code{\link{DatasetMeans}} for one sample is created by \cr \code{getDataset(sampleSizes =, means =, stDevs =)} where \cr \code{sampleSizes}, \code{means}, \code{stDevs} are vectors with stage-wise sample sizes, means and standard deviations of length given by the number of available stages. \item An element of \code{\link{DatasetMeans}} for two samples is created by \cr \code{getDataset(sampleSizes1 =, sampleSizes2 =, means1 =, means2 =, } \cr \code{stDevs1 =, stDevs2 =)} where \code{sampleSizes1}, \code{sampleSizes2}, \code{means1}, \code{means2}, \code{stDevs1}, \code{stDevs2} are vectors with stage-wise sample sizes, means and standard deviations for the two treatment groups of length given by the number of available stages. \item An element of \code{\link{DatasetRates}} for one sample is created by \cr \code{getDataset(sampleSizes =, events =)} where \code{sampleSizes}, \code{events} are vectors with stage-wise sample sizes and events of length given by the number of available stages. \item An element of \code{\link{DatasetRates}} for two samples is created by \cr \code{getDataset(sampleSizes1 =, sampleSizes2 =, events1 =, events2 =)} where \code{sampleSizes1}, \code{sampleSizes2}, \code{events1}, \code{events2} are vectors with stage-wise sample sizes and events for the two treatment groups of length given by the number of available stages. \item An element of \code{\link{DatasetSurvival}} is created by \cr \code{getDataset(events =, logRanks =, allocationRatios =)} where \code{events}, \code{logRanks}, and \code{allocation ratios} are the stage-wise events, (one-sided) logrank statistics, and allocation ratios. \item An element of \code{\link{DatasetMeans}}, \code{\link{DatasetRates}}, and \code{\link{DatasetSurvival}} for more than one comparison is created by adding subsequent digits to the variable names. The system can analyze these data in a multi-arm many-to-one comparison setting where the group with the highest index represents the control group. } Prefix \code{overall[Capital case of first letter of variable name]...} for the variable names enables entering the overall (cumulative) results and calculates stage-wise statistics. Since rpact version 3.2, the prefix \code{cumulative[Capital case of first letter of variable name]...} or \code{cum[Capital case of first letter of variable name]...} can alternatively be used for this. \code{n} can be used in place of \code{samplesizes}. Note that in survival design usually the overall (cumulative) events and logrank test statistics are provided in the output, so \cr \code{getDataset(cumulativeEvents=, cumulativeLogRanks =, cumulativeAllocationRatios =)} \cr is the usual command for entering survival data. Note also that for \code{cumulativeLogranks} also the z scores from a Cox regression can be used. For multi-arm designs, the index refers to the considered comparison. For example,\cr \code{ getDataset(events1=c(13, 33), logRanks1 = c(1.23, 1.55), events2 = c(16, NA), logRanks2 = c(1.55, NA)) } \cr refers to the case where one active arm (1) is considered at both stages whereas active arm 2 was dropped at interim. Number of events and logrank statistics are entered for the corresponding comparison to control (see Examples). For enrichment designs, the comparison of two samples is provided for an unstratified (sub-population wise) or stratified data input.\cr For unstratified (sub-population wise) data input the data sets are defined for the sub-populations S1, S2, ..., F, where F refers to the full populations. Use of \code{getDataset(S1 = , S2, ..., F = )} defines the data set to be used in \code{\link[=getAnalysisResults]{getAnalysisResults()}} (see examples)\cr For stratified data input the data sets are defined for the strata S1, S12, S2, ..., R, where R refers to the remainder of the strata such that the union of all sets is the full population. Use of \code{getDataset(S1 = , S12 = , S2, ..., R = )} defines the data set to be used in \code{\link[=getAnalysisResults]{getAnalysisResults()}} (see examples)\cr For survival data, for enrichment designs the log-rank statistics should be entered as stratified log-rank statistics in order to provide strong control of Type I error rate. For stratified data input, the variables to be specified in \code{getDataset()} are \code{events}, \code{expectedEvents}, \code{varianceEvents}, and \code{allocationRatios} or \code{overallEvents}, \code{overallExpectedEvents}, \code{overallVarianceEvents}, and \code{overallAllocationRatios}. From this, (stratified) log-rank tests are calculated. } \examples{ # Create a Dataset of Means (one group): datasetOfMeans <- getDataset( n = c(22, 11, 22, 11), means = c(1, 1.1, 1, 1), stDevs = c(1, 2, 2, 1.3) ) datasetOfMeans datasetOfMeans$show(showType = 2) \dontrun{ datasetOfMeans <- getDataset( cumulativeSampleSizes = c(22, 33, 55, 66), cumulativeMeans = c(1.000, 1.033, 1.020, 1.017), cumulativeStDevs = c(1.00, 1.38, 1.64, 1.58) ) datasetOfMeans datasetOfMeans$show(showType = 2) as.data.frame(datasetOfMeans) # Create a Dataset of Means (two groups): datasetOfMeans <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) datasetOfMeans datasetOfMeans <- getDataset( cumulativeSampleSizes1 = c(22, 33, 55, 66), cumulativeSampleSizes2 = c(22, 35, 57, 70), cumulativeMeans1 = c(1, 1.033, 1.020, 1.017), cumulativeMeans2 = c(1.4, 1.437, 2.040, 2.126), cumulativeStDevs1 = c(1, 1.38, 1.64, 1.58), cumulativeStDevs2 = c(1, 1.43, 1.82, 1.74) ) datasetOfMeans df <- data.frame( stages = 1:4, n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) datasetOfMeans <- getDataset(df) datasetOfMeans # Create a Dataset of Means (three groups) where the comparison of # treatment arm 1 to control is dropped at the second interim stage: datasetOfMeans <- getDataset( cumN1 = c(22, 33, NA), cumN2 = c(20, 34, 56), cumN3 = c(22, 31, 52), cumMeans1 = c(1.64, 1.54, NA), cumMeans2 = c(1.7, 1.5, 1.77), cumMeans3 = c(2.5, 2.06, 2.99), cumStDevs1 = c(1.5, 1.9, NA), cumStDevs2 = c(1.3, 1.3, 1.1), cumStDevs3 = c(1, 1.3, 1.8)) datasetOfMeans # Create a Dataset of Rates (one group): datasetOfRates <- getDataset( n = c(8, 10, 9, 11), events = c(4, 5, 5, 6) ) datasetOfRates # Create a Dataset of Rates (two groups): datasetOfRates <- getDataset( n2 = c(8, 10, 9, 11), n1 = c(11, 13, 12, 13), events2 = c(3, 5, 5, 6), events1 = c(10, 10, 12, 12) ) datasetOfRates # Create a Dataset of Rates (three groups) where the comparison of # treatment arm 2 to control is dropped at the first interim stage: datasetOfRates <- getDataset( cumN1 = c(22, 33, 44), cumN2 = c(20, NA, NA), cumN3 = c(20, 34, 44), cumEvents1 = c(11, 14, 22), cumEvents2 = c(17, NA, NA), cumEvents3 = c(17, 19, 33)) datasetOfRates # Create a Survival Dataset datasetSurvival <- getDataset( cumEvents = c(8, 15, 19, 31), cumAllocationRatios = c(1, 1, 1, 2), cumLogRanks = c(1.52, 1.98, 1.99, 2.11) ) datasetSurvival # Create a Survival Dataset with four comparisons where treatment # arm 2 was dropped at the first interim stage, and treatment arm 4 # at the second. datasetSurvival <- getDataset( cumEvents1 = c(18, 45, 56), cumEvents2 = c(22, NA, NA), cumEvents3 = c(12, 41, 56), cumEvents4 = c(27, 56, NA), cumLogRanks1 = c(1.52, 1.98, 1.99), cumLogRanks2 = c(3.43, NA, NA), cumLogRanks3 = c(1.45, 1.67, 1.87), cumLogRanks4 = c(1.12, 1.33, NA) ) datasetSurvival # Enrichment: Stratified and unstratified data input # The following data are from one study. Only the first # (stratified) data input enables a stratified analysis. # Stratified data input S1 <- getDataset( sampleSize1 = c(18, 17), sampleSize2 = c(12, 33), mean1 = c(125.6, 111.1), mean2 = c(107.7, 77.7), stDev1 = c(120.1, 145.6), stDev2 = c(128.5, 133.3)) S2 <- getDataset( sampleSize1 = c(11, NA), sampleSize2 = c(14, NA), mean1 = c(100.1, NA), mean2 = c( 68.3, NA), stDev1 = c(116.8, NA), stDev2 = c(124.0, NA)) S12 <- getDataset( sampleSize1 = c(21, 17), sampleSize2 = c(21, 12), mean1 = c(135.9, 117.7), mean2 = c(84.9, 107.7), stDev1 = c(185.0, 92.3), stDev2 = c(139.5, 107.7)) R <- getDataset( sampleSize1 = c(19, NA), sampleSize2 = c(33, NA), mean1 = c(142.4, NA), mean2 = c(77.1, NA), stDev1 = c(120.6, NA), stDev2 = c(163.5, NA)) dataEnrichment <- getDataset(S1 = S1, S2 = S2, S12 = S12, R = R) dataEnrichment # Unstratified data input S1N <- getDataset( sampleSize1 = c(39, 34), sampleSize2 = c(33, 45), stDev1 = c(156.503, 120.084), stDev2 = c(134.025, 126.502), mean1 = c(131.146, 114.4), mean2 = c(93.191, 85.7)) S2N <- getDataset( sampleSize1 = c(32, NA), sampleSize2 = c(35, NA), stDev1 = c(163.645, NA), stDev2 = c(131.888, NA), mean1 = c(123.594, NA), mean2 = c(78.26, NA)) F <- getDataset( sampleSize1 = c(69, NA), sampleSize2 = c(80, NA), stDev1 = c(165.468, NA), stDev2 = c(143.979, NA), mean1 = c(129.296, NA), mean2 = c(82.187, NA)) dataEnrichmentN <- getDataset(S1 = S1N, S2 = S2N, F = F) dataEnrichmentN } } rpact/man/test_plan_section.Rd0000644000176200001440000000070614335631010016141 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_quality_assurance.R \name{test_plan_section} \alias{test_plan_section} \title{Test Plan Section} \usage{ test_plan_section(section) } \arguments{ \item{section}{The section title or description.} } \description{ The section title or description will be used in the formal validation documentation. For more information visit \url{https://www.rpact.com} } \keyword{internal} rpact/man/names.TrialDesignSet.Rd0000644000176200001440000000132714261025267016420 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{names.TrialDesignSet} \alias{names.TrialDesignSet} \title{Names of a Trial Design Set Object} \usage{ \method{names}{TrialDesignSet}(x) } \arguments{ \item{x}{A \code{\link{TrialDesignSet}} object.} } \value{ Returns a \code{\link[base]{character}} vector containing the names of the \code{\link{AnalysisResults}} object. } \description{ Function to get the names of a \code{\link{TrialDesignSet}} object. } \details{ Returns the names of a design set that can be accessed by the user. } \examples{ designSet <- getDesignSet(design = getDesignGroupSequential(), alpha = c(0.01, 0.05)) names(designSet) } \keyword{internal} rpact/man/param_alternative_simulation.Rd0000644000176200001440000000100714335631010020361 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_alternative_simulation} \alias{param_alternative_simulation} \title{Parameter Description: Alternative for Simulation} \arguments{ \item{alternative}{The alternative hypothesis value for testing means under which the data is simulated. This can be a vector of assumed alternatives, default is \code{seq(0, 1, 0.2)}.} } \description{ Parameter Description: Alternative for Simulation } \keyword{internal} rpact/man/TrialDesignGroupSequential.Rd0000644000176200001440000001262414450467342017720 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesignGroupSequential} \alias{TrialDesignGroupSequential} \title{Group Sequential Design} \description{ Trial design for group sequential design. } \details{ This object should not be created directly; use \code{\link[=getDesignGroupSequential]{getDesignGroupSequential()}} with suitable arguments to create a group sequential design. } \section{Fields}{ \describe{ \item{\code{kMax}}{The maximum number of stages \code{K}. Is a numeric vector of length 1 containing a whole number.} \item{\code{alpha}}{The significance level alpha, default is 0.025. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{informationRates}}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{userAlphaSpending}}{The user defined alpha spending. Contains the cumulative alpha-spending (type I error rate) up to each interim stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{criticalValues}}{The critical values for each stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{stageLevels}}{The adjusted significance levels to reach significance in a group sequential design. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{alphaSpent}}{The cumulative alpha spent at each stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{bindingFutility}}{If \code{TRUE}, the calculation of the critical values is affected by the futility bounds and the futility threshold is binding in the sense that the study must be stopped if the futility condition was reached (default is \code{FALSE}) Is a logical vector of length 1.} \item{\code{tolerance}}{The numerical tolerance, default is \code{1e-06}. Is a numeric vector of length 1.} \item{\code{typeOfDesign}}{The type of design. Is a character vector of length 1.} \item{\code{beta}}{The Type II error rate necessary for providing sample size calculations (e.g., in \code{getSampleSizeMeans}), beta spending function designs, or optimum designs, default is \code{0.20}. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{deltaWT}}{Delta for Wang & Tsiatis Delta class. Is a numeric vector of length 1.} \item{\code{deltaPT1}}{Delta1 for Pampallona & Tsiatis class rejecting H0 boundaries. Is a numeric vector of length 1.} \item{\code{deltaPT0}}{Delta0 for Pampallona & Tsiatis class rejecting H1 (accepting H0) boundaries. Is a numeric vector of length 1.} \item{\code{futilityBounds}}{The futility bounds for each stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{gammaA}}{The parameter for the alpha spending function. Is a numeric vector of length 1.} \item{\code{gammaB}}{The parameter for the beta spending function. Is a numeric vector of length 1.} \item{\code{optimizationCriterion}}{The optimization criterion for optimum design within the Wang & Tsiatis class (\code{"ASNH1"}, \code{"ASNIFH1"}, \code{"ASNsum"}), default is \code{"ASNH1"}.} \item{\code{sided}}{Describes if the alternative is one-sided (\code{1}) or two-sided (\code{2}). Is a numeric vector of length 1 containing a whole number.} \item{\code{betaSpent}}{The cumulative beta level spent at each stage of the trial. Only applicable for beta-spending designs. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{typeBetaSpending}}{The type of beta spending. Is a character vector of length 1.} \item{\code{userBetaSpending}}{The user defined beta spending. Contains the cumulative beta-spending up to each interim stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{power}}{The one-sided power at each stage of the trial. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{twoSidedPower}}{Specifies if power is defined two-sided at each stage of the trial. Is a logical vector of length 1.} \item{\code{constantBoundsHP}}{The constant bounds up to stage kMax - 1 for the Haybittle & Peto design (default is 3). Is a numeric vector of length 1.} \item{\code{betaAdjustment}}{If \code{TRUE}, beta spending values are linearly adjusted if an overlapping of decision regions for futility stopping at earlier stages occurs. Only applicable for two-sided beta-spending designs. Is a logical vector of length 1.} \item{\code{delayedInformation}}{Delay of information for delayed response designs. Is a numeric vector of length \code{kMax} minus 1 containing values between 0 and 1.} \item{\code{decisionCriticalValues}}{The decision critical values for each stage of the trial in a delayed response design. Is a numeric vector of length \code{kMax}.} \item{\code{reversalProbabilities}}{The probability to switch from stopping the trial for success (or futility) and reaching non-rejection (or rejection) in a delayed response design. Is a numeric vector of length \code{kMax} minus 1 containing values between 0 and 1.} }} \seealso{ \code{\link[=getDesignGroupSequential]{getDesignGroupSequential()}} for creating a group sequential design. } \keyword{internal} rpact/man/as.matrix.FieldSet.Rd0000644000176200001440000000171614335631006016037 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{as.matrix.FieldSet} \alias{as.matrix.FieldSet} \title{Coerce Field Set to a Matrix} \usage{ \method{as.matrix}{FieldSet}(x, ..., enforceRowNames = TRUE, niceColumnNamesEnabled = TRUE) } \arguments{ \item{x}{A \code{\link{FieldSet}} object.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{enforceRowNames}{If \code{TRUE}, row names will be created depending on the object type, default is \code{TRUE}.} \item{niceColumnNamesEnabled}{Logical. If \code{TRUE}, nice looking column names will be used; syntactic names (variable names) otherwise (see \code{\link[base]{make.names}}).} } \value{ Returns a \code{\link[base]{matrix}}. } \description{ Returns the \code{FrameSet} as matrix. } \details{ Coerces the frame set to a matrix. } \keyword{internal} rpact/man/param_kappa.Rd0000644000176200001440000000205214335631010014674 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_kappa} \alias{param_kappa} \title{Parameter Description: Kappa} \arguments{ \item{kappa}{A numeric value > 0. A \code{kappa != 1} will be used for the specification of the shape of the Weibull distribution. Default is \code{1}, i.e., the exponential survival distribution is used instead of the Weibull distribution. Note that the Weibull distribution cannot be used for the piecewise definition of the survival time distribution, i.e., only \code{piecewiselambda} (as a single value) and \code{kappa} can be specified. This function is equivalent to \code{pweibull(t, shape = kappa, scale = 1 / lambda)} of the \code{stats} package, i.e., the scale parameter is \code{1 / 'hazard rate'}.\cr For example, \code{getPiecewiseExponentialDistribution(time = 130, piecewiseLambda = 0.01, kappa = 4.2)} and \code{pweibull(q = 130, shape = 4.2, scale = 1 / 0.01)} provide the sample result.} } \description{ Parameter Description: Kappa } \keyword{internal} rpact/man/TrialDesignPlanMeans.Rd0000644000176200001440000001255314450501306016435 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \docType{class} \name{TrialDesignPlanMeans} \alias{TrialDesignPlanMeans} \title{Trial Design Plan Means} \description{ Trial design plan for means. } \details{ This object cannot be created directly; use \code{\link[=getSampleSizeMeans]{getSampleSizeMeans()}} with suitable arguments to create a design plan for a dataset of means. } \section{Fields}{ \describe{ \item{\code{meanRatio}}{Specifies if the sample size for one-sided testing of H0: \code{mu1/mu2 = thetaH0} has been calculated. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{alternative}}{The alternative hypothesis value(s) for testing means. Is a numeric vector.} \item{\code{stDev}}{The standard deviation used for sample size and power calculation. Is a numeric vector of length 1.} \item{\code{groups}}{The group numbers. Is a numeric vector.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{optimumAllocationRatio}}{The allocation ratio that is optimum with respect to the overall sample size at given power. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{effect}}{The effect for randomly creating normally distributed responses. Is a numeric vector of length \code{kMax}.} \item{\code{overallReject}}{The overall rejection probability. Is a numeric vector.} \item{\code{rejectPerStage}}{The probability to reject a hypothesis per stage of the trial. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{expectedNumberOfSubjects}}{The expected number of subjects under specified alternative.} \item{\code{nFixed}}{The sample size in a fixed (one-stage) design. Is a positive numeric vector.} \item{\code{nFixed1}}{The sample size in treatment arm 1 in a fixed (one-stage) design. Is a positive numeric vector.} \item{\code{nFixed2}}{The sample size in treatment arm 2 in a fixed (one-stage) design. Is a positive numeric vector.} \item{\code{informationRates}}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{maxNumberOfSubjects}}{The maximum number of subjects for power calculations. Is a numeric vector.} \item{\code{maxNumberOfSubjects1}}{The maximum number of subjects in treatment arm 1. Is a numeric vector.} \item{\code{maxNumberOfSubjects2}}{The maximum number of subjects in treatment arm 2. Is a numeric vector.} \item{\code{numberOfSubjects}}{In simulation results data set: The number of subjects under consideration when the interim analysis takes place.} \item{\code{numberOfSubjects1}}{In simulation results data set: The number of subjects under consideration in treatment arm 1 when the interim analysis takes place.} \item{\code{numberOfSubjects2}}{In simulation results data set: The number of subjects under consideration in treatment arm 2 when the interim analysis takes place.} \item{\code{expectedNumberOfSubjectsH0}}{The expected number of subjects under H0. Is a numeric vector.} \item{\code{expectedNumberOfSubjectsH01}}{The expected number of subjects under a value between H0 and H1. Is a numeric vector.} \item{\code{expectedNumberOfSubjectsH1}}{The expected number of subjects under H1. Is a numeric vector.} \item{\code{criticalValuesEffectScale}}{The critical values for each stage of the trial on the effect size scale.} \item{\code{criticalValuesEffectScaleLower}}{The lower critical values for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{criticalValuesEffectScaleUpper}}{The upper critical values for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{criticalValuesPValueScale}}{The critical values for each stage of the trial on the p-value scale.} \item{\code{futilityBoundsEffectScale}}{The futility bounds for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{futilityBoundsEffectScaleLower}}{The lower futility bounds for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{futilityBoundsEffectScaleUpper}}{The upper futility bounds for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{futilityBoundsPValueScale}}{The futility bounds for each stage of the trial on the p-value scale. Is a numeric matrix.} }} \keyword{internal} rpact/man/SimulationResultsMultiArmRates.Rd0000644000176200001440000001505314450467343020623 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsMultiArmRates} \alias{SimulationResultsMultiArmRates} \title{Class for Simulation Results Multi-Arm Rates} \description{ A class for simulation results rates in multi-arm designs. } \details{ Use \code{\link[=getSimulationMultiArmRates]{getSimulationMultiArmRates()}} to create an object of this type. } \section{Fields}{ \describe{ \item{\code{maxNumberOfIterations}}{The number of simulation iterations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{plannedSubjects}}{Determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, refers to the number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{maxNumberOfSubjects}}{The maximum number of subjects for power calculations. Is a numeric vector.} \item{\code{calcSubjectsFunction}}{An optional function that can be entered to define how sample size is recalculated. By default, recalculation is performed with conditional power with specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage}.} \item{\code{expectedNumberOfSubjects}}{The expected number of subjects under specified alternative.} \item{\code{activeArms}}{The number of active treatment arms to be compared with control. Is a numeric vector of length 1 containing a whole number.} \item{\code{effectMatrix}}{The matrix of effect sizes with \code{activeArms} columns and number of rows reflecting the different situations to consider.} \item{\code{typeOfShape}}{The shape of the dose-response relationship over the treatment groups. Is a character vector of length 1.} \item{\code{piMaxVector}}{The range of assumed probabilities for the treatment group with highest response for \code{"linear"} and \code{"sigmoidEmax"} model.} \item{\code{piControl}}{The assumed probability in the control arm for simulation and under which the sample size recalculation is performed. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{piH1}}{The assumed probability in the active treatment arm(s) under which the sample size recalculation is performed. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{piControlH1}}{The assumed probability in the reference group, for which the conditional power was calculated. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{gED50}}{The ED50 of the sigmoid Emax model. Only necessary if \code{typeOfShape = "sigmoidEmax"} has been specified. Is a numeric vector of length 1.} \item{\code{slope}}{The slope of the sigmoid Emax model, if \code{typeOfShape = "sigmoidEmax"} Is a numeric vector of length 1.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{adaptations}}{Indicates whether or not an adaptation takes place at interim k. Is a logical vector of length \code{kMax} minus 1.} \item{\code{typeOfSelection}}{The way the treatment arms or populations are selected at interim. Is a character vector of length 1.} \item{\code{effectMeasure}}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (\code{"effectEstimate"}). Is a character vector of length 1.} \item{\code{successCriterion}}{Defines when the study is stopped for efficacy at interim. \code{"all"} stops the trial if the efficacy criterion has been fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim. Is a character vector of length 1.} \item{\code{epsilonValue}}{Needs to be specified if \code{typeOfSelection = "epsilon"}. Is a numeric vector of length 1.} \item{\code{rValue}}{Needs to be specified if \code{typeOfSelection = "rBest"}. Is a numeric vector of length 1.} \item{\code{threshold}}{The selection criterion: treatment arm/population is only selected if \code{effectMeasure} exceeds \code{threshold}. Either a single numeric value or a numeric vector of length \code{activeArms} referring to a separate threshold condition for each treatment arm.} \item{\code{selectArmsFunction}}{An optional function that can be entered to define how treatment arms are selected.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{selectedArms}}{The selected arms in multi-armed designs.} \item{\code{numberOfActiveArms}}{The number of active arms in a multi-armed design. Is a numeric matrix.} \item{\code{rejectAtLeastOne}}{The probability to reject at least one of the (multiple) hypotheses. Is a numeric vector.} \item{\code{rejectedArmsPerStage}}{The simulated number of rejected arms per stage.} \item{\code{successPerStage}}{The simulated success probabilities per stage where success is defined by user. Is a numeric matrix.} \item{\code{sampleSizes}}{The sample sizes for each group and stage. Is a numeric vector of length number of stages times number of groups containing whole numbers.} \item{\code{conditionalPowerAchieved}}{The calculated conditional power, under the assumption of observed or assumed effect sizes. Is a numeric matrix.} }} \keyword{internal} rpact/man/TrialDesignPlanSurvival.Rd0000644000176200001440000002002514450501306017176 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \docType{class} \name{TrialDesignPlanSurvival} \alias{TrialDesignPlanSurvival} \title{Trial Design Plan Survival} \description{ Trial design plan for survival data. } \details{ This object cannot be created directly; use \code{\link[=getSampleSizeSurvival]{getSampleSizeSurvival()}} with suitable arguments to create a design plan for a dataset of survival data. } \section{Fields}{ \describe{ \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{typeOfComputation}}{The type of computation used, either \code{"Schoenfeld", "Freedman"}, or \code{"HsiehFreedman"}.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{pi1}}{The assumed event rate in the treatment group. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{pi2}}{The assumed event rate in the control group. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{median1}}{The assumed median survival time in the treatment group. Is a numeric vector.} \item{\code{median2}}{The assumed median survival time in the reference group. Is a numeric vector of length 1.} \item{\code{lambda1}}{The assumed hazard rate in the treatment group. Is a numeric vector of length \code{kMax}.} \item{\code{lambda2}}{The assumed hazard rate in the reference group. Is a numeric vector of length 1.} \item{\code{hazardRatio}}{The hazard ratios under consideration. Is a numeric vector of length \code{kMax}.} \item{\code{maxNumberOfSubjects}}{The maximum number of subjects for power calculations. Is a numeric vector.} \item{\code{maxNumberOfSubjects1}}{The maximum number of subjects in treatment arm 1. Is a numeric vector.} \item{\code{maxNumberOfSubjects2}}{The maximum number of subjects in treatment arm 2. Is a numeric vector.} \item{\code{maxNumberOfEvents}}{The maximum number of events for power calculations. Is a positive numeric vector of length \code{kMax}.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{optimumAllocationRatio}}{The allocation ratio that is optimum with respect to the overall sample size at given power. Is a logical vector of length 1.} \item{\code{accountForObservationTimes}}{If \code{FALSE}, only the event rates are used for the calculation of the maximum number of subjects. Is a logical vector of length 1.} \item{\code{eventTime}}{The assumed time under which the event rates are calculated. Is a numeric vector of length 1.} \item{\code{accrualTime}}{The assumed accrual time intervals for the study. Is a numeric vector.} \item{\code{totalAccrualTime}}{The total accrual time, i.e., the maximum of \code{accrualTime}. Is a positive numeric vector of length 1.} \item{\code{accrualIntensity}}{The absolute accrual intensities. Is a numeric vector of length \code{kMax}.} \item{\code{accrualIntensityRelative}}{The relative accrual intensities.} \item{\code{kappa}}{The shape of the Weibull distribution if \code{kappa!=1}. Is a numeric vector of length 1.} \item{\code{piecewiseSurvivalTime}}{The time intervals for the piecewise definition of the exponential survival time cumulative distribution function. Is a numeric vector.} \item{\code{followUpTime}}{The assumed follow-up time for the study. Is a numeric vector of length 1.} \item{\code{dropoutRate1}}{The assumed drop-out rate in the treatment group. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{dropoutRate2}}{The assumed drop-out rate in the control group. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{dropoutTime}}{The assumed time for drop-out rates in the control and treatment group. Is a numeric vector of length 1.} \item{\code{chi}}{The calculated event probability at end of trial. Is a numeric vector.} \item{\code{expectedNumberOfEvents}}{The expected number of events under specified alternative. Is a numeric vector.} \item{\code{eventsFixed}}{The number of events in a fixed sample size design. Is a numeric vector.} \item{\code{nFixed}}{The sample size in a fixed (one-stage) design. Is a positive numeric vector.} \item{\code{nFixed1}}{The sample size in treatment arm 1 in a fixed (one-stage) design. Is a positive numeric vector.} \item{\code{nFixed2}}{The sample size in treatment arm 2 in a fixed (one-stage) design. Is a positive numeric vector.} \item{\code{overallReject}}{The overall rejection probability. Is a numeric vector.} \item{\code{rejectPerStage}}{The probability to reject a hypothesis per stage of the trial. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{informationRates}}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{analysisTime}}{The estimated time of analysis. Is a numeric matrix.} \item{\code{studyDurationH1}}{The study duration under the alternative hypothesis. Is a positive numeric vector.} \item{\code{studyDuration}}{The study duration for specified effect size. Is a positive numeric vector.} \item{\code{maxStudyDuration}}{The maximum study duration in survival designs. Is a numeric vector.} \item{\code{eventsPerStage}}{The number of events per stage. Is a numeric matrix.} \item{\code{expectedEventsH0}}{The expected number of events under H0. Is a numeric vector.} \item{\code{expectedEventsH01}}{The expected number of events under a value between H0 and H1. Is a numeric vector.} \item{\code{expectedEventsH1}}{The expected number of events under H1. Is a numeric vector.} \item{\code{numberOfSubjects}}{In simulation results data set: The number of subjects under consideration when the interim analysis takes place.} \item{\code{numberOfSubjects1}}{In simulation results data set: The number of subjects under consideration in treatment arm 1 when the interim analysis takes place.} \item{\code{numberOfSubjects2}}{In simulation results data set: The number of subjects under consideration in treatment arm 2 when the interim analysis takes place.} \item{\code{expectedNumberOfSubjectsH1}}{The expected number of subjects under H1. Is a numeric vector.} \item{\code{expectedNumberOfSubjects}}{The expected number of subjects under specified alternative.} \item{\code{criticalValuesEffectScale}}{The critical values for each stage of the trial on the effect size scale.} \item{\code{criticalValuesEffectScaleLower}}{The lower critical values for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{criticalValuesEffectScaleUpper}}{The upper critical values for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{criticalValuesPValueScale}}{The critical values for each stage of the trial on the p-value scale.} \item{\code{futilityBoundsEffectScale}}{The futility bounds for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{futilityBoundsEffectScaleLower}}{The lower futility bounds for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{futilityBoundsEffectScaleUpper}}{The upper futility bounds for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{futilityBoundsPValueScale}}{The futility bounds for each stage of the trial on the p-value scale. Is a numeric matrix.} }} \keyword{internal} rpact/man/ConditionalPowerResults.Rd0000644000176200001440000000330714450467342017303 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{ConditionalPowerResults} \alias{ConditionalPowerResults} \title{Conditional Power Results} \description{ Class for conditional power calculations } \details{ This object cannot be created directly; use \code{\link[=getConditionalPower]{getConditionalPower()}} with suitable arguments to create the results of a group sequential or a combination test design. } \section{Fields}{ \describe{ \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{simulated}}{Describes if the power for Fisher's combination test has been simulated. Only applicable when using Fisher designs. Is a logical vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{assumedStDev}}{The assumed standard deviation(s) for means analysis. Is a numeric vector.} }} \keyword{internal} rpact/man/getPlotSettings.Rd0000644000176200001440000000177014232463333015574 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_plot_settings.R \name{getPlotSettings} \alias{getPlotSettings} \title{Get Plot Settings} \usage{ getPlotSettings( lineSize = 0.8, pointSize = 3, pointColor = NA_character_, mainTitleFontSize = 14, axesTextFontSize = 10, legendFontSize = 11, scalingFactor = 1 ) } \arguments{ \item{lineSize}{The line size, default is \code{0.8}.} \item{pointSize}{The point size, default is \code{3}.} \item{pointColor}{The point color (character), default is \code{NA_character_}.} \item{mainTitleFontSize}{The main title font size, default is \code{14}.} \item{axesTextFontSize}{The axes text font size, default is \code{10}.} \item{legendFontSize}{The legend font size, default is \code{11}.} \item{scalingFactor}{The scaling factor, default is \code{1}.} } \description{ Returns a plot settings object. } \details{ Returns an object of class \code{PlotSettings} that collects typical plot settings. } \keyword{internal} rpact/man/param_niceColumnNamesEnabled.Rd0000644000176200001440000000077414335631011020145 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_niceColumnNamesEnabled} \alias{param_niceColumnNamesEnabled} \title{Parameter Description: Nice Column Names Enabled} \arguments{ \item{niceColumnNamesEnabled}{Logical. If \code{TRUE}, nice looking column names will be used; syntactic names (variable names) otherwise (see \code{\link[base]{make.names}}).} } \description{ Parameter Description: Nice Column Names Enabled } \keyword{internal} rpact/man/param_slope.Rd0000644000176200001440000000063614335631011014731 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_slope} \alias{param_slope} \title{Parameter Description: Slope} \arguments{ \item{slope}{If \code{typeOfShape = "sigmoidEmax"} is selected, \code{"slope"} can be entered to specify the slope of the sigmoid Emax model, default is 1.} } \description{ Parameter Description: Slope } \keyword{internal} rpact/man/getDesignSet.Rd0000644000176200001440000000614214372411347015023 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{getDesignSet} \alias{getDesignSet} \title{Get Design Set} \usage{ getDesignSet(...) } \arguments{ \item{...}{\code{designs} or \code{design} and one or more design parameters, e.g., \code{deltaWT = c(0.1, 0.3, 0.4)}. \itemize{ \item \code{design} The master design (optional, you need to specify an additional parameter that shall be varied). \item \code{designs} The designs to compare (optional, you need to specify the variable \code{variedParameters}). }} } \value{ Returns a \code{\link{TrialDesignSet}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.TrialDesignSet]{names}} to obtain the field names, \item \code{\link[=length.TrialDesignSet]{length}} to obtain the number of design, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.TrialDesignSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.TrialDesignSet]{plot()}} to plot the object, \item \code{\link[=as.data.frame.TrialDesignSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Creates a trial design set object and returns it. } \details{ Specify a master design and one or more design parameters or a list of designs. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Example 1 design <- getDesignGroupSequential( alpha = 0.05, kMax = 6, sided = 2, typeOfDesign = "WT", deltaWT = 0.1 ) designSet <- getDesignSet() designSet$add(design = design, deltaWT = c(0.3, 0.4)) \dontrun{ if (require(ggplot2)) plot(designSet, type = 1) } # Example 2 (shorter script) design <- getDesignGroupSequential( alpha = 0.05, kMax = 6, sided = 2, typeOfDesign = "WT", deltaWT = 0.1 ) designSet <- getDesignSet(design = design, deltaWT = c(0.3, 0.4)) \dontrun{ if (require(ggplot2)) plot(designSet, type = 1) } # Example 3 (use of designs instead of design) d1 <- getDesignGroupSequential( alpha = 0.05, kMax = 2, sided = 1, beta = 0.2, typeOfDesign = "asHSD", gammaA = 0.5, typeBetaSpending = "bsHSD", gammaB = 0.5 ) d2 <- getDesignGroupSequential( alpha = 0.05, kMax = 4, sided = 1, beta = 0.2, typeOfDesign = "asP", typeBetaSpending = "bsP" ) designSet <- getDesignSet( designs = c(d1, d2), variedParameters = c("typeOfDesign", "kMax") ) \dontrun{ if (require(ggplot2)) plot(designSet, type = 8, nMax = 20) } } rpact/man/param_conditionalPower.Rd0000644000176200001440000000072714335631011017130 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_conditionalPower} \alias{param_conditionalPower} \title{Parameter Description: Conditional Power} \arguments{ \item{conditionalPower}{The conditional power for the subsequent stage under which the sample size recalculation is performed. Must be a positive numeric of length 1.} } \description{ Parameter Description: Conditional Power } \keyword{internal} rpact/man/ConditionalPowerResultsRates.Rd0000644000176200001440000000342714450467342020305 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{ConditionalPowerResultsRates} \alias{ConditionalPowerResultsRates} \title{Conditional Power Results Rates} \description{ Class for conditional power calculations of rates data } \details{ This object cannot be created directly; use \code{\link{getConditionalPower}} with suitable arguments to create the results of a group sequential or a combination test design. } \section{Fields}{ \describe{ \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{simulated}}{Describes if the power for Fisher's combination test has been simulated. Only applicable when using Fisher designs. Is a logical vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{pi1}}{The assumed probability or probabilities in the active treatment group in two-group designs, or the alternative probability for a one-group design.} \item{\code{pi2}}{The assumed probability in the reference group for two-group designs. Is a numeric vector of length 1 containing a value between 0 and 1.} }} \keyword{internal} rpact/man/param_accrualTime.Rd0000644000176200001440000000067714335631011016045 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_accrualTime} \alias{param_accrualTime} \title{Parameter Description: Accrual Time} \arguments{ \item{accrualTime}{The assumed accrual time intervals for the study, default is \code{c(0, 12)} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} } \description{ Parameter Description: Accrual Time } \keyword{internal} rpact/man/AnalysisResultsMultiHypotheses.Rd0000644000176200001440000000101314335631006020655 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsMultiHypotheses} \alias{AnalysisResultsMultiHypotheses} \title{Basic Class for Analysis Results Multi-Hypotheses} \description{ A basic class for multi-hypotheses analysis results. } \details{ \code{AnalysisResultsMultiHypotheses} is the basic class for \itemize{ \item \code{\link{AnalysisResultsMultiArm}} and \item \code{\link{AnalysisResultsEnrichment}}. } } \keyword{internal} rpact/man/param_median2.Rd0000644000176200001440000000062514312324046015126 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_median2} \alias{param_median2} \title{Parameter Description: Median (2)} \arguments{ \item{median2}{The assumed median survival time in the reference group, there is no default. Must be a positive numeric of length 1.} } \description{ Parameter Description: Median (2) } \keyword{internal} rpact/man/param_intersectionTest_MultiArm.Rd0000644000176200001440000000115014335631011020757 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_intersectionTest_MultiArm} \alias{param_intersectionTest_MultiArm} \title{Parameter Description: Intersection Test} \arguments{ \item{intersectionTest}{Defines the multiple test for the intersection hypotheses in the closed system of hypotheses. Five options are available in multi-arm designs: \code{"Dunnett"}, \code{"Bonferroni"}, \code{"Simes"}, \code{"Sidak"}, and \code{"Hierarchical"}, default is \code{"Dunnett"}.} } \description{ Parameter Description: Intersection Test } \keyword{internal} rpact/man/SimulationResultsEnrichmentRates.Rd0000644000176200001440000001525114450467343021165 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsEnrichmentRates} \alias{SimulationResultsEnrichmentRates} \title{Class for Simulation Results Enrichment Rates} \description{ A class for simulation results rates in enrichment designs. } \details{ Use \code{\link[=getSimulationEnrichmentRates]{getSimulationEnrichmentRates()}} to create an object of this type. } \section{Fields}{ \describe{ \item{\code{maxNumberOfIterations}}{The number of simulation iterations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{plannedSubjects}}{Determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, refers to the number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{minNumberOfSubjectsPerStage}}{Determines the minimum number of subjects per stage for data-driven sample size recalculation. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, is the minimum number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{maxNumberOfSubjectsPerStage}}{Determines the maximum number of subjects per stage for data-driven sample size recalculation. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, is the minimum number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{calcSubjectsFunction}}{An optional function that can be entered to define how sample size is recalculated. By default, recalculation is performed with conditional power with specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage}.} \item{\code{expectedNumberOfSubjects}}{The expected number of subjects under specified alternative.} \item{\code{populations}}{The number of populations in an enrichment design. Is a numeric vector of length 1 containing a whole number.} \item{\code{effectList}}{The list of subsets, prevalences and effect sizes with columns and number of rows reflecting the different situations to be considered.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. When testing means and rates, a non-stratified analysis can be performed on overall data. For survival data, only a stratified analysis is possible. Is a logical vector of length 1.} \item{\code{adaptations}}{Indicates whether or not an adaptation takes place at interim k. Is a logical vector of length \code{kMax} minus 1.} \item{\code{piTreatmentH1}}{The assumed probabilities in the active arm under which the sample size recalculation was performed and the conditional power was calculated.} \item{\code{piControlH1}}{The assumed probability in the reference group, for which the conditional power was calculated. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{typeOfSelection}}{The way the treatment arms or populations are selected at interim. Is a character vector of length 1.} \item{\code{effectMeasure}}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (\code{"effectEstimate"}). Is a character vector of length 1.} \item{\code{successCriterion}}{Defines when the study is stopped for efficacy at interim. \code{"all"} stops the trial if the efficacy criterion has been fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim. Is a character vector of length 1.} \item{\code{epsilonValue}}{Needs to be specified if \code{typeOfSelection = "epsilon"}. Is a numeric vector of length 1.} \item{\code{rValue}}{Needs to be specified if \code{typeOfSelection = "rBest"}. Is a numeric vector of length 1.} \item{\code{threshold}}{The selection criterion: treatment arm/population is only selected if \code{effectMeasure} exceeds \code{threshold}. Either a single numeric value or a numeric vector of length \code{activeArms} referring to a separate threshold condition for each treatment arm.} \item{\code{selectPopulationsFunction}}{An optional function that can be entered to define the way of how populations are selected.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{selectedPopulations}}{The selected populations in enrichment designs.} \item{\code{numberOfPopulations}}{The number of populations in an enrichment design. Is a numeric matrix.} \item{\code{rejectAtLeastOne}}{The probability to reject at least one of the (multiple) hypotheses. Is a numeric vector.} \item{\code{rejectedPopulationsPerStage}}{The simulated number of rejected populations per stage.} \item{\code{successPerStage}}{The simulated success probabilities per stage where success is defined by user. Is a numeric matrix.} \item{\code{sampleSizes}}{The sample sizes for each group and stage. Is a numeric vector of length number of stages times number of groups containing whole numbers.} \item{\code{conditionalPowerAchieved}}{The calculated conditional power, under the assumption of observed or assumed effect sizes. Is a numeric matrix.} }} \keyword{internal} rpact/man/AccrualTime.Rd0000644000176200001440000000353014450467343014631 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_time.R \docType{class} \name{AccrualTime} \alias{AccrualTime} \title{Accrual Time} \description{ Class for the definition of accrual time and accrual intensity. } \details{ \code{AccrualTime} is a class for the definition of accrual time and accrual intensity. } \section{Fields}{ \describe{ \item{\code{endOfAccrualIsUserDefined}}{If \code{TRUE}, the end of accrual has to be defined by the user (i.e., the length of \code{accrualTime} is equal to the length of \code{accrualIntensity -1}). Is a logical vector of length 1.} \item{\code{followUpTimeMustBeUserDefined}}{Specifies whether follow up time needs to be defined or not. Is a logical vector of length 1.} \item{\code{maxNumberOfSubjectsIsUserDefined}}{If \code{TRUE}, the maximum number of subjects has been specified by the user, if \code{FALSE}, it was calculated.} \item{\code{maxNumberOfSubjectsCanBeCalculatedDirectly}}{If \code{TRUE}, the maximum number of subjects can directly be calculated. Is a logical vector of length 1.} \item{\code{absoluteAccrualIntensityEnabled}}{If \code{TRUE}, absolute accrual intensity is enabled. Is a logical vector of length 1.} \item{\code{accrualTime}}{The assumed accrual time intervals for the study. Is a numeric vector.} \item{\code{accrualIntensity}}{The absolute accrual intensities. Is a numeric vector of length \code{kMax}.} \item{\code{accrualIntensityRelative}}{The relative accrual intensities.} \item{\code{maxNumberOfSubjects}}{The maximum number of subjects for power calculations. Is a numeric vector.} \item{\code{remainingTime}}{In survival designs, the remaining time for observation. Is a numeric vector of length 1.} \item{\code{piecewiseAccrualEnabled}}{Indicates whether piecewise accrual is selected. Is a logical vector of length 1.} }} \keyword{internal} rpact/man/StageResults.Rd0000644000176200001440000000355514450467342015073 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResults} \alias{StageResults} \title{Basic Stage Results} \description{ Basic class for stage results. } \details{ \code{StageResults} is the basic class for \itemize{ \item \code{\link{StageResultsMeans}}, \item \code{\link{StageResultsRates}}, \item \code{\link{StageResultsSurvival}}, \item \code{\link{StageResultsMultiArmMeans}}, \item \code{\link{StageResultsMultiArmRates}}, \item \code{\link{StageResultsMultiArmSurvival}}, \item \code{\link{StageResultsEnrichmentMeans}}, \item \code{\link{StageResultsEnrichmentRates}}, and \item \code{\link{StageResultsEnrichmentSurvival}}. } } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{pValues}}{The stage-wise p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{weightsFisher}}{The weights for Fisher's combination test. Is a numeric vector of length \code{kMax}.} \item{\code{weightsInverseNormal}}{The weights for the inverse normal statistic. Is a numeric vector of length \code{kMax}.} }} \keyword{internal} rpact/man/PiecewiseSurvivalTime.Rd0000644000176200001440000000404514450501306016716 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_time.R \docType{class} \name{PiecewiseSurvivalTime} \alias{PiecewiseSurvivalTime} \title{Piecewise Exponential Survival Time} \description{ Class for the definition of piecewise survival times. } \details{ \code{PiecewiseSurvivalTime} is a class for the definition of piecewise survival times. } \section{Fields}{ \describe{ \item{\code{piecewiseSurvivalTime}}{The time intervals for the piecewise definition of the exponential survival time cumulative distribution function. Is a numeric vector.} \item{\code{lambda1}}{The assumed hazard rate in the treatment group. Is a numeric vector of length \code{kMax}.} \item{\code{lambda2}}{The assumed hazard rate in the reference group. Is a numeric vector of length 1.} \item{\code{hazardRatio}}{The hazard ratios under consideration. Is a numeric vector of length \code{kMax}.} \item{\code{pi1}}{The assumed event rate in the treatment group. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{pi2}}{The assumed event rate in the control group. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{median1}}{The assumed median survival time in the treatment group. Is a numeric vector.} \item{\code{median2}}{The assumed median survival time in the reference group. Is a numeric vector of length 1.} \item{\code{eventTime}}{The assumed time under which the event rates are calculated. Is a numeric vector of length 1.} \item{\code{kappa}}{The shape of the Weibull distribution if \code{kappa!=1}. Is a numeric vector of length 1.} \item{\code{piecewiseSurvivalEnabled}}{Indicates whether specification of piecewise definition of survival time is selected. Is a logical vector of length 1.} \item{\code{delayedResponseAllowed}}{If \code{TRUE}, delayed response is allowed, if \code{FALSE} the response is not delayed.} \item{\code{delayedResponseEnabled}}{If \code{TRUE}, delayed response is enabled, if \code{FALSE} delayed response is not enabled.} }} \keyword{internal} rpact/man/param_showSource.Rd0000644000176200001440000000216514335631011015747 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_showSource} \alias{param_showSource} \title{Parameter Description: Show Source} \arguments{ \item{showSource}{Logical. If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with the base R \code{plot} function. Alternatively \code{showSource} can be defined as one of the following character values: \itemize{ \item \code{"commands"}: returns a character vector with plot commands \item \code{"axes"}: returns a list with the axes definitions \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function does not stop if an error occurs) \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function stops if an error occurs) } Note: no plot object will be returned if \code{showSource} is a character.} } \description{ Parameter Description: Show Source } \keyword{internal} rpact/man/ParameterSet.Rd0000644000176200001440000000051614232463333015026 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \docType{class} \name{ParameterSet} \alias{ParameterSet} \title{Parameter Set} \description{ Basic class for parameter sets. } \details{ The parameter set implements basic functions for a set of parameters. } \keyword{internal} rpact/man/param_successCriterion.Rd0000644000176200001440000000125414335631011017133 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_successCriterion} \alias{param_successCriterion} \title{Parameter Description: Success Criterion} \arguments{ \item{successCriterion}{Defines when the study is stopped for efficacy at interim. Two options are available: \code{"all"} stops the trial if the efficacy criterion is fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim, default is \code{"all"}.} } \description{ Parameter Description: Success Criterion } \keyword{internal} rpact/man/param_digits.Rd0000644000176200001440000000056714312324046015077 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_digits} \alias{param_digits} \title{Parameter Description: Digits} \arguments{ \item{digits}{Defines how many digits are to be used for numeric values. Must be a positive integer of length 1.} } \description{ Parameter Description: Digits } \keyword{internal} rpact/man/as.data.frame.AnalysisResults.Rd0000644000176200001440000000201214370472345020201 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \name{as.data.frame.AnalysisResults} \alias{as.data.frame.AnalysisResults} \title{Coerce AnalysisResults to a Data Frame} \usage{ \method{as.data.frame}{AnalysisResults}( x, row.names = NULL, optional = FALSE, ..., niceColumnNamesEnabled = FALSE ) } \arguments{ \item{x}{An \code{\link{AnalysisResults}} object created by \code{\link[=getAnalysisResults]{getAnalysisResults()}}.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{niceColumnNamesEnabled}{Logical. If \code{TRUE}, nice looking column names will be used; syntactic names (variable names) otherwise (see \code{\link[base]{make.names}}).} } \value{ Returns a \code{\link[base]{data.frame}}. } \description{ Returns the \code{\link{AnalysisResults}} object as data frame. } \details{ Coerces the analysis results to a data frame. } \keyword{internal} rpact/man/getParameterName.Rd0000644000176200001440000000155614313321256015655 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_utilities.R \name{getParameterName} \alias{getParameterName} \title{Get Parameter Name} \usage{ getParameterName(obj, parameterCaption) } \value{ Returns a \code{\link[base]{character}} of specifying the corresponding name of a given parameter caption. Returns \code{NULL} if the specified \code{parameterCaption} does not exist. } \description{ Returns the parameter name for a given object and parameter caption. } \details{ This function identifies and returns the parameter name for a given caption that will be used in print outputs of an rpact result object. } \examples{ getParameterName(getDesignInverseNormal(), "Maximum number of stages") } \seealso{ \code{\link[=getParameterCaption]{getParameterCaption()}} for getting the parameter caption for a given name. } \keyword{internal} rpact/man/param_thetaH1.Rd0000644000176200001440000000073614335631010015105 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_thetaH1} \alias{param_thetaH1} \title{Parameter Description: Effect Under Alternative} \arguments{ \item{thetaH1}{If specified, the value of the alternative under which the conditional power or sample size recalculation calculation is performed. Must be a numeric of length 1.} } \description{ Parameter Description: Effect Under Alternative } \keyword{internal} rpact/man/param_theta.Rd0000644000176200001440000000055114232463333014716 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_theta} \alias{param_theta} \title{Parameter Description: Theta} \arguments{ \item{theta}{A vector of standardized effect sizes (theta values), default is a sequence from -1 to 1.} } \description{ Parameter Description: Theta } \keyword{internal} rpact/man/AnalysisResults.Rd0000644000176200001440000000140314411263270015570 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResults} \alias{AnalysisResults} \title{Basic Class for Analysis Results} \description{ A basic class for analysis results. } \details{ \code{AnalysisResults} is the basic class for \itemize{ \item \code{\link{AnalysisResultsFisher}}, \item \code{\link{AnalysisResultsGroupSequential}}, \item \code{\link{AnalysisResultsInverseNormal}}, \item \code{\link{AnalysisResultsMultiArmFisher}}, \item \code{\link{AnalysisResultsMultiArmInverseNormal}}, \item \code{\link{AnalysisResultsConditionalDunnett}}, \item \code{\link{AnalysisResultsEnrichmentFisher}}, \item \code{\link{AnalysisResultsEnrichmentInverseNormal}}. } } \keyword{internal} rpact/man/rpact.Rd0000644000176200001440000000516114446320652013547 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pkgname.R \docType{package} \name{rpact} \alias{rpact} \alias{rpact-package} \title{rpact - Confirmatory Adaptive Clinical Trial Design and Analysis} \description{ rpact (R Package for Adaptive Clinical Trials) is a comprehensive package that enables the design, simulation, and analysis of confirmatory adaptive group sequential designs. Particularly, the methods described in the recent monograph by Wassmer and Brannath (published by Springer, 2016) are implemented. It also comprises advanced methods for sample size calculations for fixed sample size designs incl., e.g., sample size calculation for survival trials with piecewise exponentially distributed survival times and staggered patients entry. } \details{ rpact includes the classical group sequential designs (incl. user spending function approaches) where the sample sizes per stage (or the time points of interim analysis) cannot be changed in a data-driven way. Confirmatory adaptive designs explicitly allow for this under control of the Type I error rate. They are either based on the combination testing or the conditional rejection probability (CRP) principle. Both are available, for the former the inverse normal combination test and Fisher's combination test can be used. Specific techniques of the adaptive methodology are also available, e.g., overall confidence intervals, overall p-values, and conditional and predictive power assessments. Simulations can be performed to assess the design characteristics of a (user-defined) sample size recalculation strategy. Designs are available for trials with continuous, binary, and survival endpoint. For more information please visit \href{https://www.rpact.org}{www.rpact.org}. If you are interested in professional services round about the package or need a comprehensive validation documentation to fulfill regulatory requirements please visit \href{https://www.rpact.com}{www.rpact.com}. rpact is developed by \itemize{ \item Gernot Wassmer (\email{gernot.wassmer@rpact.com}) and \item Friedrich Pahlke (\email{friedrich.pahlke@rpact.com}). } } \references{ Wassmer, G., Brannath, W. (2016) Group Sequential and Confirmatory Adaptive Designs in Clinical Trials (Springer Series in Pharmaceutical Statistics; \doi{10.1007/978-3-319-32562-0}) } \seealso{ Useful links: \itemize{ \item \url{https://www.rpact.org} \item \url{https://www.rpact.com} \item \url{https://github.com/rpact-com/rpact} \item \url{https://rpact-com.github.io/rpact/} \item Report bugs at \url{https://github.com/rpact-com/rpact/issues} } } \author{ Gernot Wassmer, Friedrich Pahlke } rpact/man/getPiecewiseSurvivalTime.Rd0000644000176200001440000001421514372411347017426 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_time.R \name{getPiecewiseSurvivalTime} \alias{getPiecewiseSurvivalTime} \title{Get Piecewise Survival Time} \usage{ getPiecewiseSurvivalTime( piecewiseSurvivalTime = NA_real_, ..., lambda1 = NA_real_, lambda2 = NA_real_, hazardRatio = NA_real_, pi1 = NA_real_, pi2 = NA_real_, median1 = NA_real_, median2 = NA_real_, eventTime = 12, kappa = 1, delayedResponseAllowed = FALSE ) } \arguments{ \item{piecewiseSurvivalTime}{A vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function (see details).} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{lambda1}{The assumed hazard rate in the treatment group, there is no default. \code{lambda1} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} \item{lambda2}{The assumed hazard rate in the reference group, there is no default. \code{lambda2} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} \item{hazardRatio}{The vector of hazard ratios under consideration. If the event or hazard rates in both treatment groups are defined, the hazard ratio needs not to be specified as it is calculated, there is no default. Must be a positive numeric of length 1.} \item{pi1}{A numeric value or vector that represents the assumed event rate in the treatment group, default is \code{seq(0.2, 0.5, 0.1)} (power calculations and simulations) or \code{seq(0.4, 0.6, 0.1)} (sample size calculations).} \item{pi2}{A numeric value that represents the assumed event rate in the control group, default is \code{0.2}.} \item{median1}{The assumed median survival time in the treatment group, there is no default.} \item{median2}{The assumed median survival time in the reference group, there is no default. Must be a positive numeric of length 1.} \item{eventTime}{The assumed time under which the event rates are calculated, default is \code{12}.} \item{kappa}{A numeric value > 0. A \code{kappa != 1} will be used for the specification of the shape of the Weibull distribution. Default is \code{1}, i.e., the exponential survival distribution is used instead of the Weibull distribution. Note that the Weibull distribution cannot be used for the piecewise definition of the survival time distribution, i.e., only \code{piecewiselambda} (as a single value) and \code{kappa} can be specified. This function is equivalent to \code{pweibull(t, shape = kappa, scale = 1 / lambda)} of the \code{stats} package, i.e., the scale parameter is \code{1 / 'hazard rate'}.\cr For example, \code{getPiecewiseExponentialDistribution(time = 130, piecewiseLambda = 0.01, kappa = 4.2)} and \code{pweibull(q = 130, shape = 4.2, scale = 1 / 0.01)} provide the sample result.} \item{delayedResponseAllowed}{If \code{TRUE}, delayed response is allowed; otherwise it will be validated that the response is not delayed, default is \code{FALSE}.} } \value{ Returns a \code{\link{PiecewiseSurvivalTime}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.ParameterSet]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns a \code{PiecewiseSurvivalTime} object that contains the all relevant parameters of an exponential survival time cumulative distribution function. Use \code{\link[base]{names}} to obtain the field names. } \section{Piecewise survival time}{ The first element of the vector \code{piecewiseSurvivalTime} must be equal to \code{0}. \code{piecewiseSurvivalTime} can also be a list that combines the definition of the time intervals and hazard rates in the reference group. The definition of the survival time in the treatment group is obtained by the specification of the hazard ratio (see examples for details). } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ getPiecewiseSurvivalTime(lambda2 = 0.5, hazardRatio = 0.8) getPiecewiseSurvivalTime(lambda2 = 0.5, lambda1 = 0.4) getPiecewiseSurvivalTime(pi2 = 0.5, hazardRatio = 0.8) getPiecewiseSurvivalTime(pi2 = 0.5, pi1 = 0.4) getPiecewiseSurvivalTime(pi1 = 0.3) getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4) getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), hazardRatio = 0.8) getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), lambda1 = c(0.025, 0.04, 0.015) * 0.8) pwst <- getPiecewiseSurvivalTime(list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007), hazardRatio = 0.75) pwst \dontrun{ # The object created by getPiecewiseSurvivalTime() can be used directly in # getSampleSizeSurvival(): getSampleSizeSurvival(piecewiseSurvivalTime = pwst) # The object created by getPiecewiseSurvivalTime() can be used directly in # getPowerSurvival(): getPowerSurvival(piecewiseSurvivalTime = pwst, maxNumberOfEvents = 40, maxNumberOfSubjects = 100) } } rpact/man/param_maxNumberOfSubjectsPerStage.Rd0000644000176200001440000000152014335631011021161 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_maxNumberOfSubjectsPerStage} \alias{param_maxNumberOfSubjectsPerStage} \title{Parameter Description: Maximum Number Of Subjects Per Stage} \arguments{ \item{maxNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{maxNumberOfSubjectsPerStage} with length kMax determines the maximum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{maxNumberOfSubjectsPerStage} refers to the maximum number of subjects per selected active arm.} } \description{ Parameter Description: Maximum Number Of Subjects Per Stage } \keyword{internal} rpact/man/as.data.frame.TrialDesignPlan.Rd0000644000176200001440000000230214335631007020047 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \name{as.data.frame.TrialDesignPlan} \alias{as.data.frame.TrialDesignPlan} \title{Coerce Trial Design Plan to a Data Frame} \usage{ \method{as.data.frame}{TrialDesignPlan}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ... ) } \arguments{ \item{x}{A \code{\link{TrialDesignPlan}} object.} \item{niceColumnNamesEnabled}{Logical. If \code{TRUE}, nice looking column names will be used; syntactic names (variable names) otherwise (see \code{\link[base]{make.names}}).} \item{includeAllParameters}{Logical. If \code{TRUE}, all available parameters will be included in the data frame; a meaningful parameter selection otherwise, default is \code{FALSE}.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} } \value{ Returns a \code{\link[base]{data.frame}}. } \description{ Returns the \code{\link{TrialDesignPlan}} as data frame. } \details{ Coerces the design plan to a data frame. } \examples{ as.data.frame(getSampleSizeMeans()) } \keyword{internal} rpact/man/getRepeatedPValues.Rd0000644000176200001440000000472414427374265016203 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getRepeatedPValues} \alias{getRepeatedPValues} \title{Get Repeated P Values} \usage{ getRepeatedPValues(stageResults, ..., tolerance = 1e-06) } \arguments{ \item{stageResults}{The results at given stage, obtained from \code{\link[=getStageResults]{getStageResults()}}.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{tolerance}{The numerical tolerance, default is \code{1e-06}. Must be a positive numeric of length 1.} } \value{ Returns a \code{\link[base]{numeric}} vector of length \code{kMax} or in case of multi-arm stage results a \code{\link[base]{matrix}} (each column represents a stage, each row a comparison) containing the repeated p values. } \description{ Calculates the repeated p-values for a given test results. } \details{ The repeated p-value at a given stage of the trial is defined as the smallest significance level under which at given test design the test results obtain rejection of the null hypothesis. It can be calculated at each stage of the trial and can thus be used as a monitoring tool. The repeated p-values are provided up to the specified stage. In multi-arm trials, the repeated p-values are defined separately for each treatment comparison within the closed testing procedure. } \section{Note on the dependency of \code{mnormt}}{ If \code{intersectionTest = "Dunnett"} or \code{intersectionTest = "SpiessensDebois"}, or the design is a conditional Dunnett design and the dataset is a multi-arm or enrichment dataset, \code{rpact} uses the R package \href{https://cran.r-project.org/package=mnormt}{mnormt} to calculate the analysis results. } \examples{ \dontrun{ design <- getDesignInverseNormal(kMax = 2) data <- getDataset( n = c( 20, 30), means = c( 50, 51), stDevs = c(130, 140) ) getRepeatedPValues(getStageResults(design, dataInput = data)) } } \seealso{ Other analysis functions: \code{\link{getAnalysisResults}()}, \code{\link{getClosedCombinationTestResults}()}, \code{\link{getClosedConditionalDunnettTestResults}()}, \code{\link{getConditionalPower}()}, \code{\link{getConditionalRejectionProbabilities}()}, \code{\link{getFinalConfidenceInterval}()}, \code{\link{getFinalPValue}()}, \code{\link{getRepeatedConfidenceIntervals}()}, \code{\link{getStageResults}()}, \code{\link{getTestActions}()} } \concept{analysis functions} rpact/man/param_normalApproximation.Rd0000644000176200001440000000137514335631010017652 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_normalApproximation} \alias{param_normalApproximation} \title{Parameter Description: Normal Approximation} \arguments{ \item{normalApproximation}{The type of computation of the p-values. Default is \code{FALSE} for testing means (i.e., the t test is used) and \code{TRUE} for testing rates and the hazard ratio. For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test (one sample) or the exact test of Fisher (two samples) is used for calculating the p-values. In the survival setting \code{normalApproximation = FALSE} has no effect.} } \description{ Parameter Description: Normal Approximation } \keyword{internal} rpact/man/AnalysisResultsEnrichmentInverseNormal.Rd0000644000176200001440000001007214450467342022325 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsEnrichmentInverseNormal} \alias{AnalysisResultsEnrichmentInverseNormal} \title{Analysis Results Enrichment Inverse Normal} \description{ Class for enrichment analysis results based on a inverse normal design. } \details{ This object cannot be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the enrichment analysis results of an inverse normal design. } \section{Fields}{ \describe{ \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{pi1}}{The assumed probability or probabilities in the active treatment group in two-group designs, or the alternative probability for a one-group design.} \item{\code{pi2}}{The assumed probability in the reference group for two-group designs. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{assumedStDevs}}{Assumed standard deviations to calculate conditional power in multi-arm trials or enrichment designs. Is a numeric vector.} \item{\code{piTreatments}}{The assumed rates in the treatment groups for multi-arm and enrichment designs, i.e., designs with multiple subsets.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple (i.e., >2) treatment arms or population enrichment designs when testing means. Available options for multiple arms: \code{"overallPooled", "pairwisePooled", "notPooled"}. Available options for enrichment designs: \code{"pooled", "pooledFromFull", "notPooled"}.} \item{\code{conditionalRejectionProbabilities}}{The probabilities of rejecting the null hypothesis at each stage, given the stage has been reached. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{repeatedConfidenceIntervalLowerBounds}}{The lower bound of the confidence intervals that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedConfidenceIntervalUpperBounds}}{The upper bound of the confidence interval that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedPValues}}{The p-values that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{piControls}}{The assumed rates in the control group for enrichment designs, i.e., designs with multiple subsets.} \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. When testing means and rates, a non-stratified analysis can be performed on overall data. For survival data, only a stratified analysis is possible. Is a logical vector of length 1.} }} \keyword{internal} rpact/man/dataEnrichmentMeans.Rd0000644000176200001440000000104114313321260016326 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataEnrichmentMeans} \alias{dataEnrichmentMeans} \title{Enrichment Dataset of Means} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataEnrichmentMeans } \description{ A dataset containing the sample sizes, means, and standard deviations of two groups. Use \code{getDataset(dataEnrichmentMeans)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/names.FieldSet.Rd0000644000176200001440000000110414261025266015226 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{names.FieldSet} \alias{names.FieldSet} \title{Names of a Field Set Object} \usage{ \method{names}{FieldSet}(x) } \arguments{ \item{x}{A \code{\link{FieldSet}} object.} } \value{ Returns a \code{\link[base]{character}} vector containing the names of the \code{\link{AnalysisResults}} object. } \description{ Function to get the names of a \code{\link{FieldSet}} object. } \details{ Returns the names of a field set that can be accessed by the user. } \keyword{internal} rpact/man/getSampleSizeSurvival.Rd0000644000176200001440000003726414400317257016754 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getSampleSizeSurvival} \alias{getSampleSizeSurvival} \title{Get Sample Size Survival} \usage{ getSampleSizeSurvival( design = NULL, ..., typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0 = 1, pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, kappa = 1, hazardRatio = NA_real_, piecewiseSurvivalTime = NA_real_, allocationRatioPlanned = NA_real_, eventTime = 12, accrualTime = c(0, 12), accrualIntensity = 0.1, accrualIntensityType = c("auto", "absolute", "relative"), followUpTime = NA_real_, maxNumberOfSubjects = NA_real_, dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12 ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{typeOfComputation}{Three options are available: \code{"Schoenfeld"}, \code{"Freedman"}, \code{"HsiehFreedman"}, the default is \code{"Schoenfeld"}. For details, see Hsieh (Statistics in Medicine, 1992). For non-inferiority testing (i.e., \code{thetaH0 != 1}), only Schoenfeld's formula can be used.} \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{pi1}{A numeric value or vector that represents the assumed event rate in the treatment group, default is \code{seq(0.2, 0.5, 0.1)} (power calculations and simulations) or \code{seq(0.4, 0.6, 0.1)} (sample size calculations).} \item{pi2}{A numeric value that represents the assumed event rate in the control group, default is \code{0.2}.} \item{lambda1}{The assumed hazard rate in the treatment group, there is no default. \code{lambda1} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} \item{lambda2}{The assumed hazard rate in the reference group, there is no default. \code{lambda2} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} \item{median1}{The assumed median survival time in the treatment group, there is no default.} \item{median2}{The assumed median survival time in the reference group, there is no default. Must be a positive numeric of length 1.} \item{kappa}{A numeric value > 0. A \code{kappa != 1} will be used for the specification of the shape of the Weibull distribution. Default is \code{1}, i.e., the exponential survival distribution is used instead of the Weibull distribution. Note that the Weibull distribution cannot be used for the piecewise definition of the survival time distribution, i.e., only \code{piecewiselambda} (as a single value) and \code{kappa} can be specified. This function is equivalent to \code{pweibull(t, shape = kappa, scale = 1 / lambda)} of the \code{stats} package, i.e., the scale parameter is \code{1 / 'hazard rate'}.\cr For example, \code{getPiecewiseExponentialDistribution(time = 130, piecewiseLambda = 0.01, kappa = 4.2)} and \code{pweibull(q = 130, shape = 4.2, scale = 1 / 0.01)} provide the sample result.} \item{hazardRatio}{The vector of hazard ratios under consideration. If the event or hazard rates in both treatment groups are defined, the hazard ratio needs not to be specified as it is calculated, there is no default. Must be a positive numeric of length 1.} \item{piecewiseSurvivalTime}{A vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function \cr (for details see \code{\link[=getPiecewiseSurvivalTime]{getPiecewiseSurvivalTime()}}).} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the smallest overall sample size is determined.} \item{eventTime}{The assumed time under which the event rates are calculated, default is \code{12}.} \item{accrualTime}{The assumed accrual time intervals for the study, default is \code{c(0, 12)} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{accrualIntensity}{A numeric vector of accrual intensities, default is the relative intensity \code{0.1} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{accrualIntensityType}{A character value specifying the accrual intensity input type. Must be one of \code{"auto"}, \code{"absolute"}, or \code{"relative"}; default is \code{"auto"}, i.e., if all values are < 1 the type is \code{"relative"}, otherwise it is \code{"absolute"}.} \item{followUpTime}{The assumed (additional) follow-up time for the study, default is \code{6}. The total study duration is \code{accrualTime + followUpTime}.} \item{maxNumberOfSubjects}{If \code{maxNumberOfSubjects > 0} is specified, the follow-up time for the required number of events is determined.} \item{dropoutRate1}{The assumed drop-out rate in the treatment group, default is \code{0}.} \item{dropoutRate2}{The assumed drop-out rate in the control group, default is \code{0}.} \item{dropoutTime}{The assumed time for drop-out rates in the control and the treatment group, default is \code{12}.} } \value{ Returns a \code{\link{TrialDesignPlan}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.TrialDesignSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.TrialDesignPlan]{plot()}} to plot the object, \item \code{\link[=as.data.frame.TrialDesignPlan]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the sample size for testing the hazard ratio in a two treatment groups survival design. } \details{ At given design the function calculates the number of events and an estimate for the necessary number of subjects for testing the hazard ratio in a survival design. It also calculates the time when the required events are expected under the given assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times and constant or non-constant piecewise accrual). Additionally, an allocation ratio = \code{n1 / n2} can be specified where \code{n1} and \code{n2} are the number of subjects in the two treatment groups. Optional argument \code{accountForObservationTimes}: if \code{accountForObservationTimes = TRUE}, the number of subjects is calculated assuming specific accrual and follow-up time, default is \code{TRUE}. The formula of Kim & Tsiatis (Biometrics, 1990) is used to calculate the expected number of events under the alternative (see also Lakatos & Lan, Statistics in Medicine, 1992). These formulas are generalized to piecewise survival times and non-constant piecewise accrual over time.\cr Optional argument \code{accountForObservationTimes}: if \code{accountForObservationTimes = FALSE}, only the event rates are used for the calculation of the maximum number of subjects. } \section{Piecewise survival time}{ The first element of the vector \code{piecewiseSurvivalTime} must be equal to \code{0}. \code{piecewiseSurvivalTime} can also be a list that combines the definition of the time intervals and hazard rates in the reference group. The definition of the survival time in the treatment group is obtained by the specification of the hazard ratio (see examples for details). } \section{Staggered patient entry}{ \code{accrualTime} is the time period of subjects' accrual in a study. It can be a value that defines the end of accrual or a vector. In this case, \code{accrualTime} can be used to define a non-constant accrual over time. For this, \code{accrualTime} is a vector that defines the accrual intervals. The first element of \code{accrualTime} must be equal to \code{0} and, additionally, \code{accrualIntensity} needs to be specified. \code{accrualIntensity} itself is a value or a vector (depending on the length of \code{accrualTime}) that defines the intensity how subjects enter the trial in the intervals defined through \code{accrualTime}. \code{accrualTime} can also be a list that combines the definition of the accrual time and accrual intensity (see below and examples for details). If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to be specified and the end of accrual is calculated. In that case, \code{accrualIntensity} is the number of subjects per time unit, i.e., the absolute accrual intensity. If the length of \code{accrualTime} equals the length of \code{accrualIntensity - 1} (i.e., the end of accrual is defined), \code{maxNumberOfSubjects} is calculated if the absolute accrual intensity is given. If all elements in \code{accrualIntensity} are smaller than 1, \code{accrualIntensity} defines the \emph{relative} intensity how subjects enter the trial. For example, \code{accrualIntensity = c(0.1, 0.2)} specifies that in the second accrual interval the intensity is doubled as compared to the first accrual interval. The actual (absolute) accrual intensity is calculated for the calculated or given \code{maxNumberOfSubjects}. Note that the default is \code{accrualIntensity = 0.1} meaning that the \emph{absolute} accrual intensity will be calculated. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Fixed sample size trial with median survival 20 vs. 30 months in treatment and # reference group, respectively, alpha = 0.05 (two-sided), and power 1 - beta = 90\%. # 20 subjects will be recruited per month up to 400 subjects, i.e., accrual time # is 20 months. getSampleSizeSurvival(alpha = 0.05, sided = 2, beta = 0.1, lambda1 = log(2) / 20, lambda2 = log(2) / 30, accrualTime = c(0,20), accrualIntensity = 20) \dontrun{ # Fixed sample size with minimum required definitions, pi1 = c(0.4,0.5,0.6) and # pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default, # only alpha = 0.01 is specified getSampleSizeSurvival(alpha = 0.01) # Four stage O'Brien & Fleming group sequential design with minimum required # definitions, pi1 = c(0.4,0.5,0.6) and pi2 = 0.2 at event time 12, # accrual time 12 and follow-up time 6 as default getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 4)) # For fixed sample design, determine necessary accrual time if 200 subjects and # 30 subjects per time unit can be recruited getSampleSizeSurvival(accrualTime = c(0), accrualIntensity = c(30), maxNumberOfSubjects = 200) # Determine necessary accrual time if 200 subjects and if the first 6 time units # 20 subjects per time unit can be recruited, then 30 subjects per time unit getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200) # Determine maximum number of Subjects if the first 6 time units 20 subjects # per time unit can be recruited, and after 10 time units 30 subjects per time unit getSampleSizeSurvival(accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30)) # Specify accrual time as a list at <- list( "0 - <6" = 20, "6 - Inf" = 30) getSampleSizeSurvival(accrualTime = at, maxNumberOfSubjects = 200) # Specify accrual time as a list, if maximum number of subjects need to be calculated at <- list( "0 - <6" = 20, "6 - <=10" = 30) getSampleSizeSurvival(accrualTime = at) # Specify effect size for a two-stage group design with O'Brien & Fleming boundaries # Effect size is based on event rates at specified event time # needs to be specified because it should be shown that hazard ratio < 1 getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24) # Effect size is based on event rate at specified event # time for the reference group and hazard ratio getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24) # Effect size is based on hazard rate for the reference group and hazard ratio getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02) # Specification of piecewise exponential survival time and hazard ratios getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), hazardRatio = c(1.5, 1.8, 2)) # Specification of piecewise exponential survival time as a list and hazard ratios pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) # Specification of piecewise exponential survival time for both treatment arms getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015, 0.03, 0.06)) # Specification of piecewise exponential survival time as a list pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) # Specify effect size based on median survival times getSampleSizeSurvival(median1 = 5, median2 = 3) # Specify effect size based on median survival times of Weibull distribtion with # kappa = 2 getSampleSizeSurvival(median1 = 5, median2 = 3, kappa = 2) # Identify minimal and maximal required subjects to # reach the required events in spite of dropouts getSampleSizeSurvival(accrualTime = c(0, 18), accrualIntensity = c(20, 30), lambda2 = 0.4, lambda1 = 0.3, followUpTime = Inf, dropoutRate1 = 0.001, dropoutRate2 = 0.005) getSampleSizeSurvival(accrualTime = c(0, 18), accrualIntensity = c(20, 30), lambda2 = 0.4, lambda1 = 0.3, followUpTime = 0, dropoutRate1 = 0.001, dropoutRate2 = 0.005) } } \seealso{ Other sample size functions: \code{\link{getSampleSizeMeans}()}, \code{\link{getSampleSizeRates}()} } \concept{sample size functions} rpact/man/getLongFormat.Rd0000644000176200001440000000145014313321256015175 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_utilities.R \name{getLongFormat} \alias{getLongFormat} \title{Get Long Format} \usage{ getLongFormat(dataInput) } \value{ A \code{\link[base]{data.frame}} will be returned. } \description{ Returns the specified dataset as a \code{\link[base]{data.frame}} in so-called long format. } \details{ In the long format (narrow, stacked), the data are presented with one column containing all the values and another column listing the context of the value, i.e., the data for the different groups are in one column and the dataset contains an additional "group" column. } \seealso{ \code{\link[=getWideFormat]{getWideFormat()}} for returning the dataset as a \code{\link[base]{data.frame}} in wide format. } \keyword{internal} rpact/man/print.TrialDesignCharacteristics.Rd0000644000176200001440000000166714445304766021050 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \name{print.TrialDesignCharacteristics} \alias{print.TrialDesignCharacteristics} \title{Trial Design Characteristics Printing} \usage{ \method{print}{TrialDesignCharacteristics}(x, ..., markdown = FALSE, showDesign = TRUE) } \arguments{ \item{x}{The trial design characteristics object.} \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} \item{markdown}{If \code{TRUE}, the object \code{x} will be printed using markdown syntax; normal representation will be used otherwise (default is \code{FALSE})} \item{showDesign}{Show the design print output above the design characteristics, default is \code{TRUE}.} } \description{ Prints the design characteristics object. } \details{ Generic function to print all kinds of design characteristics. } rpact/man/FieldSet.Rd0000644000176200001440000000046614232463333014135 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \docType{class} \name{FieldSet} \alias{FieldSet} \title{Field Set} \description{ Basic class for field sets. } \details{ The field set implements basic functions for a set of fields. } \keyword{internal} rpact/man/sub-TrialDesignSet-method.Rd0000644000176200001440000000114614427374266017374 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{[,TrialDesignSet-method} \alias{[,TrialDesignSet-method} \title{Access Trial Design by Index} \usage{ \S4method{[}{TrialDesignSet}(x, i, j, ..., drop = TRUE) } \description{ Function to the \code{TrialDesign} at position \code{i} in a \code{TrialDesignSet} object. } \details{ Can be used to iterate over all designs in a design set. } \examples{ designSet <- getDesignSet(design = getDesignFisher(), alpha = c(0.01, 0.05)) for (i in 1:length(designSet)) { print(designSet[i]$alpha) } } \keyword{internal} rpact/man/readDataset.Rd0000644000176200001440000000740414372411346014660 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{readDataset} \alias{readDataset} \title{Read Dataset} \usage{ readDataset( file, ..., header = TRUE, sep = ",", quote = "\\"", dec = ".", fill = TRUE, comment.char = "", fileEncoding = "UTF-8" ) } \arguments{ \item{file}{A CSV file (see \code{\link[utils]{read.table}}).} \item{...}{Further arguments to be passed to code{\link[utils]{read.table}}.} \item{header}{A logical value indicating whether the file contains the names of the variables as its first line.} \item{sep}{The field separator character. Values on each line of the file are separated by this character. If sep = "," (the default for \code{readDataset}) the separator is a comma.} \item{quote}{The set of quoting characters. To disable quoting altogether, use quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only considered for columns read as character, which is all of them unless \code{colClasses} is specified.} \item{dec}{The character used in the file for decimal points.} \item{fill}{logical. If \code{TRUE} then in case the rows have unequal length, blank fields are implicitly added.} \item{comment.char}{character: a character vector of length one containing a single character or an empty string. Use "" to turn off the interpretation of comments altogether.} \item{fileEncoding}{character string: if non-empty declares the encoding used on a file (not a connection) so the character data can be re-encoded. See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'.} } \value{ Returns a \code{\link{Dataset}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.Dataset]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Reads a data file and returns it as dataset object. } \details{ \code{readDataset} is a wrapper function that uses \code{\link[utils]{read.table}} to read the CSV file into a data frame, transfers it from long to wide format with \code{\link[stats]{reshape}} and puts the data to \code{\link[=getDataset]{getDataset()}}. } \examples{ \dontrun{ dataFileRates <- system.file("extdata", "dataset_rates.csv", package = "rpact" ) if (dataFileRates != "") { datasetRates <- readDataset(dataFileRates) datasetRates } dataFileMeansMultiArm <- system.file("extdata", "dataset_means_multi-arm.csv", package = "rpact" ) if (dataFileMeansMultiArm != "") { datasetMeansMultiArm <- readDataset(dataFileMeansMultiArm) datasetMeansMultiArm } dataFileRatesMultiArm <- system.file("extdata", "dataset_rates_multi-arm.csv", package = "rpact" ) if (dataFileRatesMultiArm != "") { datasetRatesMultiArm <- readDataset(dataFileRatesMultiArm) datasetRatesMultiArm } dataFileSurvivalMultiArm <- system.file("extdata", "dataset_survival_multi-arm.csv", package = "rpact" ) if (dataFileSurvivalMultiArm != "") { datasetSurvivalMultiArm <- readDataset(dataFileSurvivalMultiArm) datasetSurvivalMultiArm } } } \seealso{ \itemize{ \item \code{\link[=readDatasets]{readDatasets()}} for reading multiple datasets, \item \code{\link[=writeDataset]{writeDataset()}} for writing a single dataset, \item \code{\link[=writeDatasets]{writeDatasets()}} for writing multiple datasets. } } rpact/man/param_typeOfShape.Rd0000644000176200001440000000174014445304766016053 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_typeOfShape} \alias{param_typeOfShape} \title{Parameter Description: Type Of Shape} \arguments{ \item{typeOfShape}{The shape of the dose-response relationship over the treatment groups. This can be either \code{"linear"}, \code{"sigmoidEmax"}, or \code{"userDefined"}, default is \code{"linear"}.\cr For \code{"linear"}, \code{"muMaxVector"} specifies the range of effect sizes for the treatment group with highest response. If \code{"sigmoidEmax"} is selected, \code{"gED50"} and \code{"slope"} has to be entered to specify the ED50 and the slope of the sigmoid Emax model. For \code{"sigmoidEmax"}, \code{"muMaxVector"} specifies the range of effect sizes for the treatment group with response according to infinite dose. If \code{"userDefined"} is selected, \code{"effectMatrix"} has to be entered.} } \description{ Parameter Description: Type Of Shape } \keyword{internal} rpact/man/DatasetRates.Rd0000644000176200001440000000232514450467342015024 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \docType{class} \name{DatasetRates} \alias{DatasetRates} \title{Dataset of Rates} \description{ Class for a dataset of rates. } \details{ This object cannot be created directly; better use \code{\link{getDataset}} with suitable arguments to create a dataset of rates. } \section{Fields}{ \describe{ \item{\code{groups}}{The group numbers. Is a numeric vector.} \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{sampleSizes}}{The sample sizes for each group and stage. Is a numeric vector of length number of stages times number of groups containing whole numbers.} \item{\code{overallSampleSizes}}{The overall, i.e., cumulative sample sizes. Is a numeric vector of length number of stages times number of groups.} \item{\code{events}}{The number of events in each group at each stage. Is a numeric vector of length number of stages times number of groups.} \item{\code{overallEvents}}{The overall, i.e., cumulative events. Is a numeric vector of length number of stages times number of groups containing whole numbers.} }} \keyword{internal} rpact/man/writeDataset.Rd0000644000176200001440000000567114335631006015077 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{writeDataset} \alias{writeDataset} \title{Write Dataset} \usage{ writeDataset( dataset, file, ..., append = FALSE, quote = TRUE, sep = ",", eol = "\\n", na = "NA", dec = ".", row.names = TRUE, col.names = NA, qmethod = "double", fileEncoding = "UTF-8" ) } \arguments{ \item{dataset}{A dataset.} \item{file}{The target CSV file.} \item{...}{Further arguments to be passed to \code{\link[utils]{write.table}}.} \item{append}{Logical. Only relevant if file is a character string. If \code{TRUE}, the output is appended to the file. If \code{FALSE}, any existing file of the name is destroyed.} \item{quote}{The set of quoting characters. To disable quoting altogether, use quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only considered for columns read as character, which is all of them unless \code{colClasses} is specified.} \item{sep}{The field separator character. Values on each line of the file are separated by this character. If sep = "," (the default for \code{writeDataset}) the separator is a comma.} \item{eol}{The character(s) to print at the end of each line (row).} \item{na}{The string to use for missing values in the data.} \item{dec}{The character used in the file for decimal points.} \item{row.names}{Either a logical value indicating whether the row names of \code{dataset} are to be written along with \code{dataset}, or a character vector of row names to be written.} \item{col.names}{Either a logical value indicating whether the column names of \code{dataset} are to be written along with \code{dataset}, or a character vector of column names to be written. See the section on 'CSV files' for the meaning of \code{col.names = NA}.} \item{qmethod}{A character string specifying how to deal with embedded double quote characters when quoting strings. Must be one of "double" (default in \code{writeDataset}) or "escape".} \item{fileEncoding}{Character string: if non-empty declares the encoding used on a file (not a connection) so the character data can be re-encoded. See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'.} } \description{ Writes a dataset to a CSV file. } \details{ \code{\link[=writeDataset]{writeDataset()}} is a wrapper function that coerces the dataset to a data frame and uses \cr \code{\link[utils]{write.table}} to write it to a CSV file. } \examples{ \dontrun{ datasetOfRates <- getDataset( n1 = c(11, 13, 12, 13), n2 = c(8, 10, 9, 11), events1 = c(10, 10, 12, 12), events2 = c(3, 5, 5, 6) ) writeDataset(datasetOfRates, "dataset_rates.csv") } } \seealso{ \itemize{ \item \code{\link[=writeDatasets]{writeDatasets()}} for writing multiple datasets, \item \code{\link[=readDataset]{readDataset()}} for reading a single dataset, \item \code{\link[=readDatasets]{readDatasets()}} for reading multiple datasets. } } rpact/man/plot.Dataset.Rd0000644000176200001440000000520514372411346014776 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{plot.Dataset} \alias{plot.Dataset} \title{Dataset Plotting} \usage{ \method{plot}{Dataset}( x, y, ..., main = "Dataset", xlab = "Stage", ylab = NA_character_, legendTitle = "Group", palette = "Set1", showSource = FALSE, plotSettings = NULL ) } \arguments{ \item{x}{The \code{\link{Dataset}} object to plot.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} \item{main}{The main title, default is \code{"Dataset"}.} \item{xlab}{The x-axis label, default is \code{"Stage"}.} \item{ylab}{The y-axis label.} \item{legendTitle}{The legend title, default is \code{"Group"}.} \item{palette}{The palette, default is \code{"Set1"}.} \item{showSource}{Logical. If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with the base R \code{plot} function. Alternatively \code{showSource} can be defined as one of the following character values: \itemize{ \item \code{"commands"}: returns a character vector with plot commands \item \code{"axes"}: returns a list with the axes definitions \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function does not stop if an error occurs) \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function stops if an error occurs) } Note: no plot object will be returned if \code{showSource} is a character.} \item{plotSettings}{An object of class \code{PlotSettings} created by \code{\link[=getPlotSettings]{getPlotSetting()s}}.} } \value{ Returns a \code{ggplot2} object. } \description{ Plots a dataset. } \details{ Generic function to plot all kinds of datasets. } \examples{ # Plot a dataset of means dataExample <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) \dontrun{ if (require(ggplot2)) plot(dataExample, main = "Comparison of Means") } # Plot a dataset of rates dataExample <- getDataset( n1 = c(8, 10, 9, 11), n2 = c(11, 13, 12, 13), events1 = c(3, 5, 5, 6), events2 = c(8, 10, 12, 12) ) \dontrun{ if (require(ggplot2)) plot(dataExample, main = "Comparison of Rates") } } rpact/man/getSimulationMultiArmRates.Rd0000644000176200001440000003227714445304766017753 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_multiarm_rates.R \name{getSimulationMultiArmRates} \alias{getSimulationMultiArmRates} \title{Get Simulation Multi-Arm Rates} \usage{ getSimulationMultiArmRates( design = NULL, ..., activeArms = 3L, effectMatrix = NULL, typeOfShape = c("linear", "sigmoidEmax", "userDefined"), piMaxVector = seq(0.2, 0.5, 0.1), piControl = 0.2, gED50 = NA_real_, slope = 1, intersectionTest = c("Dunnett", "Bonferroni", "Simes", "Sidak", "Hierarchical"), directionUpper = TRUE, adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), effectMeasure = c("effectEstimate", "testStatistic"), successCriterion = c("all", "atLeastOne"), epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedSubjects = NA_real_, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, piTreatmentsH1 = NA_real_, piControlH1 = NA_real_, maxNumberOfIterations = 1000L, seed = NA_real_, calcSubjectsFunction = NULL, selectArmsFunction = NULL, showStatistics = FALSE ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{activeArms}{The number of active treatment arms to be compared with control, default is \code{3}.} \item{effectMatrix}{Matrix of effect sizes with \code{activeArms} columns and number of rows reflecting the different situations to consider.} \item{typeOfShape}{The shape of the dose-response relationship over the treatment groups. This can be either \code{"linear"}, \code{"sigmoidEmax"}, or \code{"userDefined"}, default is \code{"linear"}.\cr For \code{"linear"}, \code{"muMaxVector"} specifies the range of effect sizes for the treatment group with highest response. If \code{"sigmoidEmax"} is selected, \code{"gED50"} and \code{"slope"} has to be entered to specify the ED50 and the slope of the sigmoid Emax model. For \code{"sigmoidEmax"}, \code{"muMaxVector"} specifies the range of effect sizes for the treatment group with response according to infinite dose. If \code{"userDefined"} is selected, \code{"effectMatrix"} has to be entered.} \item{piMaxVector}{Range of assumed probabilities for the treatment group with highest response for \code{"linear"} and \code{"sigmoidEmax"} model, default is \code{seq(0, 1, 0.2)}.} \item{piControl}{If specified, the assumed probability in the control arm for simulation and under which the sample size recalculation is performed.} \item{gED50}{If \code{typeOfShape = "sigmoidEmax"} is selected, \code{"gED50"} has to be entered to specify the ED50 of the sigmoid Emax model.} \item{slope}{If \code{typeOfShape = "sigmoidEmax"} is selected, \code{"slope"} can be entered to specify the slope of the sigmoid Emax model, default is 1.} \item{intersectionTest}{Defines the multiple test for the intersection hypotheses in the closed system of hypotheses. Five options are available in multi-arm designs: \code{"Dunnett"}, \code{"Bonferroni"}, \code{"Simes"}, \code{"Sidak"}, and \code{"Hierarchical"}, default is \code{"Dunnett"}.} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{adaptations}{A logical vector of length \code{kMax - 1} indicating whether or not an adaptation takes place at interim k, default is \code{rep(TRUE, kMax - 1)}.} \item{typeOfSelection}{The way the treatment arms or populations are selected at interim. Five options are available: \code{"best"}, \code{"rbest"}, \code{"epsilon"}, \code{"all"}, and \code{"userDefined"}, default is \code{"best"}.\cr For \code{"rbest"} (select the \code{rValue} best treatment arms/populations), the parameter \code{rValue} has to be specified, for \code{"epsilon"} (select treatment arm/population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. If \code{"userDefined"} is selected, \code{"selectArmsFunction"} or \code{"selectPopulationsFunction"} has to be specified.} \item{effectMeasure}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (difference for means and rates or ratio for survival) (\code{"effectEstimate"}), default is \code{"effectEstimate"}.} \item{successCriterion}{Defines when the study is stopped for efficacy at interim. Two options are available: \code{"all"} stops the trial if the efficacy criterion is fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim, default is \code{"all"}.} \item{epsilonValue}{For \code{typeOfSelection = "epsilon"} (select treatment arm / population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. Must be a numeric of length 1.} \item{rValue}{For \code{typeOfSelection = "rbest"} (select the \code{rValue} best treatment arms / populations), the parameter \code{rValue} has to be specified.} \item{threshold}{Selection criterion: treatment arm / population is selected only if \code{effectMeasure} exceeds \code{threshold}, default is \code{-Inf}. \code{threshold} can also be a vector of length \code{activeArms} referring to a separate threshold condition over the treatment arms.} \item{plannedSubjects}{\code{plannedSubjects} is a numeric vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs, \code{plannedSubjects} refers to the number of subjects per selected active arm.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{minNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{minNumberOfSubjectsPerStage} with length kMax determines the minimum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{minNumberOfSubjectsPerStage} refers to the minimum number of subjects per selected active arm.} \item{maxNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{maxNumberOfSubjectsPerStage} with length kMax determines the maximum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{maxNumberOfSubjectsPerStage} refers to the maximum number of subjects per selected active arm.} \item{conditionalPower}{If \code{conditionalPower} together with \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (or \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} for survival designs) is specified, a sample size recalculation based on the specified conditional power is performed. It is defined as the power for the subsequent stage given the current data. By default, the conditional power will be calculated under the observed effect size. Optionally, you can also specify \code{thetaH1} and \code{stDevH1} (for simulating means), \code{pi1H1} and \code{pi2H1} (for simulating rates), or \code{thetaH1} (for simulating hazard ratios) as parameters under which it is calculated and the sample size recalculation is performed.} \item{piTreatmentsH1}{If specified, the assumed probability in the active treatment arm(s) under which the sample size recalculation is performed.} \item{piControlH1}{If specified, the assumed probability in the reference group (if different from \code{piControl}) for which the conditional power was calculated.} \item{maxNumberOfIterations}{The number of simulation iterations, default is \code{1000}. Must be a positive integer of length 1.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} \item{calcSubjectsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, sample size recalculation is performed with conditional power and specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details and examples).} \item{selectArmsFunction}{Optionally, a function can be entered that defines the way of how treatment arms are selected. This function is allowed to depend on \code{effectVector} with length \code{activeArms} and \code{stage} (see examples).} \item{showStatistics}{Logical. If \code{TRUE}, summary statistics of the simulated data are displayed for the \code{print} command, otherwise the output is suppressed, default is \code{FALSE}.} } \value{ Returns a \code{\link{SimulationResults}} object. The following generics (R generic functions) are available for this object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.SimulationResults]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the simulated power, stopping and selection probabilities, conditional power, and expected sample size for testing rates in a multi-arm treatment groups testing situation. } \details{ At given design the function simulates the power, stopping probabilities, selection probabilities, and expected sample size at given number of subjects, parameter configuration, and treatment arm selection rule in the multi-arm situation. An allocation ratio can be specified referring to the ratio of number of subjects in the active treatment groups as compared to the control group. The definition of \code{pi1H1} and/or \code{piControl} makes only sense if \code{kMax} > 1 and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. \code{calcSubjectsFunction}\cr This function returns the number of subjects at given conditional power and conditional critical value for specified testing situation. The function might depend on the variables \code{stage}, \code{selectedArms}, \code{directionUpper}, \code{plannedSubjects}, \code{allocationRatioPlanned}, \code{minNumberOfSubjectsPerStage}, \code{maxNumberOfSubjectsPerStage}, \code{conditionalPower}, \code{conditionalCriticalValue}, \code{overallRates}, \code{overallRatesControl}, \code{piTreatmentsH1}, and \code{piControlH1}. The function has to contain the three-dots argument '...' (see examples). } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ \dontrun{ # Simulate the power of the combination test with two interim stages and # O'Brien & Fleming boundaries using Dunnett's intersection tests if the # best treatment arm is selected at first interim. Selection only take # place if a non-negative treatment effect is observed (threshold = 0); # 20 subjects per stage and treatment arm, simulation is performed for # four parameter configurations. design <- getDesignInverseNormal(typeOfDesign = "OF") effectMatrix <- matrix(c(0.2,0.2,0.2, 0.4,0.4,0.4, 0.4,0.5,0.5, 0.4,0.5,0.6), byrow = TRUE, nrow = 4, ncol = 3) x <- getSimulationMultiArmRates(design = design, typeOfShape = "userDefined", effectMatrix = effectMatrix , piControl = 0.2, typeOfSelection = "best", threshold = 0, intersectionTest = "Dunnett", plannedSubjects = c(20, 40, 60), maxNumberOfIterations = 50) summary(x) } } rpact/man/param_populations.Rd0000644000176200001440000000057014312324046016163 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_populations} \alias{param_populations} \title{Parameter Description: Populations} \arguments{ \item{populations}{The number of populations in a two-sample comparison, default is \code{3}.} } \description{ Parameter Description: Populations } \keyword{internal} rpact/man/getPerformanceScore.Rd0000644000176200001440000000447114447473104016400 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_performance_score.R \name{getPerformanceScore} \alias{getPerformanceScore} \title{Get Performance Score} \usage{ getPerformanceScore(simulationResult) } \arguments{ \item{simulationResult}{A simulation result.} } \description{ Calculates the conditional performance score, its sub-scores and components according to Herrmann et al. (2020) for a given simulation result from a two-stage design. Larger (sub-)score and component values refer to a better performance. } \details{ The conditional performance score consists of two sub-scores, one for the sample size (subscoreSampleSize) and one for the conditional power (subscoreConditionalPower). Each of those are composed of a location (locationSampleSize, locationConditionalPower) and variation component (variationSampleSize, variationConditionalPower). The term conditional refers to an evaluation perspective where the interim results suggest a trial continuation with a second stage. The score can take values between 0 and 1. More details on the performance score can be found in Herrmann et al. (2020). } \examples{ \dontrun{ # Example from Table 3 in "A new conditional performance score for # the evaluation of adaptive group sequential designs with samplesize # recalculation from Herrmann et al 2023", p.2097 for # Observed Conditional Power approach and Delta = 0.5 # Create two-stage Pocock design with binding futility boundary at 0 design <- getDesignGroupSequential( kMax = 2, typeOfDesign = "P", futilityBounds = 0, bindingFutility = TRUE) # Initialize sample sizes and effect; # Sample sizes are referring to overall stage-wise sample sizes n1 <- 100 n2 <- 100 nMax <- n1 + n2 alternative <- 0.5 # Perform Simulation; nMax*1.5 defines the maximum # sample size for the additional stage simulationResult <- getSimulationMeans( design = design, normalApproximation = TRUE, thetaH0 = 0, alternative = alternative, plannedSubjects = c(n1, nMax), minNumberOfSubjectsPerStage = c(NA_real_, 1), maxNumberOfSubjectsPerStage = c(NA_real_, nMax * 1.5), conditionalPower = 0.8, directionUpper = TRUE, maxNumberOfIterations = 1e05, seed = 140 ) # Calculate performance score getPerformanceScore(simulationResult) } } \author{ Stephen Schueuerhuis } rpact/man/PerformanceScore.Rd0000644000176200001440000000070314435574427015701 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_performance_score.R \docType{class} \name{PerformanceScore} \alias{PerformanceScore} \title{Performance Score} \description{ Contains the conditional performance score, its sub-scores and components according to Herrmann et al. (2020) for a given simulation result. } \details{ Use \link{getPerformanceScore} to calculate the performance score. } \keyword{internal} rpact/man/param_median1.Rd0000644000176200001440000000055514232463333015133 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_median1} \alias{param_median1} \title{Parameter Description: Median (1)} \arguments{ \item{median1}{The assumed median survival time in the treatment group, there is no default.} } \description{ Parameter Description: Median (1) } \keyword{internal} rpact/man/dataEnrichmentRatesStratified.Rd0000644000176200001440000000107714313321260020371 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataEnrichmentRatesStratified} \alias{dataEnrichmentRatesStratified} \title{Stratified Enrichment Dataset of Rates} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataEnrichmentRatesStratified } \description{ A dataset containing the sample sizes and events of two groups. Use \code{getDataset(dataEnrichmentRatesStratified)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/param_includeAllParameters.Rd0000644000176200001440000000077114335631011017707 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_includeAllParameters} \alias{param_includeAllParameters} \title{Parameter Description: Include All Parameters} \arguments{ \item{includeAllParameters}{Logical. If \code{TRUE}, all available parameters will be included in the data frame; a meaningful parameter selection otherwise, default is \code{FALSE}.} } \description{ Parameter Description: Include All Parameters } \keyword{internal} rpact/man/dataEnrichmentSurvivalStratified.Rd0000644000176200001440000000115614313321260021124 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataEnrichmentSurvivalStratified} \alias{dataEnrichmentSurvivalStratified} \title{Stratified Enrichment Dataset of Survival Data} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataEnrichmentSurvivalStratified } \description{ A dataset containing the log-rank statistics, events, and allocation ratios of two groups. Use \code{getDataset(dataEnrichmentSurvivalStratified)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/utilitiesForPiecewiseExponentialDistribution.Rd0000644000176200001440000001004414335631007023555 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_utilities.R \name{utilitiesForPiecewiseExponentialDistribution} \alias{utilitiesForPiecewiseExponentialDistribution} \alias{getPiecewiseExponentialDistribution} \alias{ppwexp} \alias{getPiecewiseExponentialQuantile} \alias{qpwexp} \alias{getPiecewiseExponentialRandomNumbers} \alias{rpwexp} \title{The Piecewise Exponential Distribution} \usage{ getPiecewiseExponentialDistribution( time, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1 ) ppwexp(t, ..., s = NA_real_, lambda = NA_real_, kappa = 1) getPiecewiseExponentialQuantile( quantile, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1 ) qpwexp(q, ..., s = NA_real_, lambda = NA_real_, kappa = 1) getPiecewiseExponentialRandomNumbers( n, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1 ) rpwexp(n, ..., s = NA_real_, lambda = NA_real_, kappa = 1) } \arguments{ \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{kappa}{A numeric value > 0. A \code{kappa != 1} will be used for the specification of the shape of the Weibull distribution. Default is \code{1}, i.e., the exponential survival distribution is used instead of the Weibull distribution. Note that the Weibull distribution cannot be used for the piecewise definition of the survival time distribution, i.e., only \code{piecewiselambda} (as a single value) and \code{kappa} can be specified. This function is equivalent to \code{pweibull(t, shape = kappa, scale = 1 / lambda)} of the \code{stats} package, i.e., the scale parameter is \code{1 / 'hazard rate'}.\cr For example, \code{getPiecewiseExponentialDistribution(time = 130, piecewiseLambda = 0.01, kappa = 4.2)} and \code{pweibull(q = 130, shape = 4.2, scale = 1 / 0.01)} provide the sample result.} \item{t, time}{Vector of time values.} \item{s, piecewiseSurvivalTime}{Vector of start times defining the "time pieces".} \item{lambda, piecewiseLambda}{Vector of lambda values (hazard rates) corresponding to the start times.} \item{q, quantile}{Vector of quantiles.} \item{n}{Number of observations.} } \value{ A \code{\link[base]{numeric}} value or vector will be returned. } \description{ Distribution function, quantile function and random number generation for the piecewise exponential distribution. } \details{ \code{getPiecewiseExponentialDistribution()} (short: \code{ppwexp()}), \code{getPiecewiseExponentialQuantile()} (short: \code{qpwexp()}), and \code{getPiecewiseExponentialRandomNumbers()} (short: \code{rpwexp()}) provide probabilities, quantiles, and random numbers according to a piecewise exponential or a Weibull distribution. The piecewise definition is performed through a vector of starting times (\code{piecewiseSurvivalTime}) and a vector of hazard rates (\code{piecewiseLambda}). You can also use a list that defines the starting times and piecewise lambdas together and define piecewiseSurvivalTime as this list. The list needs to have the form, e.g., piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, ">=15" = 0.007) . For the Weibull case, you can also specify a shape parameter kappa in order to calculate probabilities, quantiles, or random numbers. In this case, no piecewise definition is possible, i.e., only piecewiseLambda (as a single value) and kappa need to be specified. } \examples{ # Calculate probabilties for a range of time values for a # piecewise exponential distribution with hazard rates # 0.025, 0.04, 0.015, and 0.007 in the intervals # [0, 6), [6, 9), [9, 15), [15, Inf), respectively, # and re-return the time values: piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, ">=15" = 0.01 ) y <- getPiecewiseExponentialDistribution(seq(0, 150, 15), piecewiseSurvivalTime = piecewiseSurvivalTime ) getPiecewiseExponentialQuantile(y, piecewiseSurvivalTime = piecewiseSurvivalTime ) } rpact/man/param_intersectionTest_Enrichment.Rd0000644000176200001440000000113414335631011021323 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_intersectionTest_Enrichment} \alias{param_intersectionTest_Enrichment} \title{Parameter Description: Intersection Test} \arguments{ \item{intersectionTest}{Defines the multiple test for the intersection hypotheses in the closed system of hypotheses. Four options are available in enrichment designs: \code{"SpiessensDebois"}, \code{"Bonferroni"}, \code{"Simes"}, and \code{"Sidak"}, default is \code{"Simes"}.} } \description{ Parameter Description: Intersection Test } \keyword{internal} rpact/man/writeDatasets.Rd0000644000176200001440000000601714335631006015255 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{writeDatasets} \alias{writeDatasets} \title{Write Multiple Datasets} \usage{ writeDatasets( datasets, file, ..., append = FALSE, quote = TRUE, sep = ",", eol = "\\n", na = "NA", dec = ".", row.names = TRUE, col.names = NA, qmethod = "double", fileEncoding = "UTF-8" ) } \arguments{ \item{datasets}{A list of datasets.} \item{file}{The target CSV file.} \item{...}{Further arguments to be passed to \code{\link[utils]{write.table}}.} \item{append}{Logical. Only relevant if file is a character string. If \code{TRUE}, the output is appended to the file. If FALSE, any existing file of the name is destroyed.} \item{quote}{The set of quoting characters. To disable quoting altogether, use quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only considered for columns read as character, which is all of them unless \code{colClasses} is specified.} \item{sep}{The field separator character. Values on each line of the file are separated by this character. If sep = "," (the default for \code{writeDatasets}) the separator is a comma.} \item{eol}{The character(s) to print at the end of each line (row).} \item{na}{The string to use for missing values in the data.} \item{dec}{The character used in the file for decimal points.} \item{row.names}{Either a logical value indicating whether the row names of \code{dataset} are to be written along with \code{dataset}, or a character vector of row names to be written.} \item{col.names}{Either a logical value indicating whether the column names of \code{dataset} are to be written along with \code{dataset}, or a character vector of column names to be written. See the section on 'CSV files' for the meaning of \code{col.names = NA}.} \item{qmethod}{A character string specifying how to deal with embedded double quote characters when quoting strings. Must be one of "double" (default in \code{writeDatasets}) or "escape".} \item{fileEncoding}{Character string: if non-empty declares the encoding used on a file (not a connection) so the character data can be re-encoded. See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'.} } \description{ Writes a list of datasets to a CSV file. } \details{ The format of the CSV file is optimized for usage of \code{\link[=readDatasets]{readDatasets()}}. } \examples{ \dontrun{ d1 <- getDataset( n1 = c(11, 13, 12, 13), n2 = c(8, 10, 9, 11), events1 = c(10, 10, 12, 12), events2 = c(3, 5, 5, 6) ) d2 <- getDataset( n1 = c(9, 13, 12, 13), n2 = c(6, 10, 9, 11), events1 = c(10, 10, 12, 12), events2 = c(4, 5, 5, 6) ) datasets <- list(d1, d2) writeDatasets(datasets, "datasets_rates.csv") } } \seealso{ \itemize{ \item \code{\link[=writeDataset]{writeDataset()}} for writing a single dataset, \item \code{\link[=readDatasets]{readDatasets()}} for reading multiple datasets, \item \code{\link[=readDataset]{readDataset()}} for reading a single dataset. } } rpact/man/param_nMax.Rd0000644000176200001440000000051514312324046014510 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_nMax} \alias{param_nMax} \title{Parameter Description: N_max} \arguments{ \item{nMax}{The maximum sample size. Must be a positive integer of length 1.} } \description{ Parameter Description: N_max } \keyword{internal} rpact/man/roxygen/0000755000176200001440000000000014070776016013641 5ustar liggesusersrpact/man/roxygen/meta.R0000644000176200001440000000024714017174150014705 0ustar liggesusers rd_family_title <- list( design = "Design functions", analysis = "Analysis functions", "analysis functions" = "Analysis functions" ) list(r6 = FALSE) rpact/man/SimulationResultsEnrichmentMeans.Rd0000644000176200001440000001476014450467343021156 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsEnrichmentMeans} \alias{SimulationResultsEnrichmentMeans} \title{Class for Simulation Results Enrichment Means} \description{ A class for simulation results means in enrichment designs. } \details{ Use \code{\link[=getSimulationEnrichmentMeans]{getSimulationEnrichmentMeans()}} to create an object of this type. } \section{Fields}{ \describe{ \item{\code{maxNumberOfIterations}}{The number of simulation iterations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{stDev}}{The standard deviation used for sample size and power calculation. Is a numeric vector of length 1.} \item{\code{plannedSubjects}}{Determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, refers to the number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{minNumberOfSubjectsPerStage}}{Determines the minimum number of subjects per stage for data-driven sample size recalculation. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, is the minimum number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{maxNumberOfSubjectsPerStage}}{Determines the maximum number of subjects per stage for data-driven sample size recalculation. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, is the minimum number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{stDevH1}}{The standard deviation under which the conditional power or sample size recalculation is performed. Is a numeric vector of length 1.} \item{\code{calcSubjectsFunction}}{An optional function that can be entered to define how sample size is recalculated. By default, recalculation is performed with conditional power with specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage}.} \item{\code{expectedNumberOfSubjects}}{The expected number of subjects under specified alternative.} \item{\code{populations}}{The number of populations in an enrichment design. Is a numeric vector of length 1 containing a whole number.} \item{\code{effectList}}{The list of subsets, prevalences and effect sizes with columns and number of rows reflecting the different situations to be considered.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. When testing means and rates, a non-stratified analysis can be performed on overall data. For survival data, only a stratified analysis is possible. Is a logical vector of length 1.} \item{\code{adaptations}}{Indicates whether or not an adaptation takes place at interim k. Is a logical vector of length \code{kMax} minus 1.} \item{\code{typeOfSelection}}{The way the treatment arms or populations are selected at interim. Is a character vector of length 1.} \item{\code{effectMeasure}}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (\code{"effectEstimate"}). Is a character vector of length 1.} \item{\code{successCriterion}}{Defines when the study is stopped for efficacy at interim. \code{"all"} stops the trial if the efficacy criterion has been fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim. Is a character vector of length 1.} \item{\code{epsilonValue}}{Needs to be specified if \code{typeOfSelection = "epsilon"}. Is a numeric vector of length 1.} \item{\code{rValue}}{Needs to be specified if \code{typeOfSelection = "rBest"}. Is a numeric vector of length 1.} \item{\code{threshold}}{The selection criterion: treatment arm/population is only selected if \code{effectMeasure} exceeds \code{threshold}. Either a single numeric value or a numeric vector of length \code{activeArms} referring to a separate threshold condition for each treatment arm.} \item{\code{selectPopulationsFunction}}{An optional function that can be entered to define the way of how populations are selected.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{selectedPopulations}}{The selected populations in enrichment designs.} \item{\code{numberOfPopulations}}{The number of populations in an enrichment design. Is a numeric matrix.} \item{\code{rejectAtLeastOne}}{The probability to reject at least one of the (multiple) hypotheses. Is a numeric vector.} \item{\code{rejectedPopulationsPerStage}}{The simulated number of rejected populations per stage.} \item{\code{successPerStage}}{The simulated success probabilities per stage where success is defined by user. Is a numeric matrix.} \item{\code{sampleSizes}}{The sample sizes for each group and stage. Is a numeric vector of length number of stages times number of groups containing whole numbers.} \item{\code{conditionalPowerAchieved}}{The calculated conditional power, under the assumption of observed or assumed effect sizes. Is a numeric matrix.} }} \keyword{internal} rpact/man/TrialDesignPlanRates.Rd0000644000176200001440000001272114450501306016445 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \docType{class} \name{TrialDesignPlanRates} \alias{TrialDesignPlanRates} \title{Trial Design Plan Rates} \description{ Trial design plan for rates. } \details{ This object cannot be created directly; use \code{\link[=getSampleSizeRates]{getSampleSizeRates()}} with suitable arguments to create a design plan for a dataset of rates. } \section{Fields}{ \describe{ \item{\code{riskRatio}}{Specifies if the sample size for one-sided testing of H0: \code{pi1 / pi2 = thetaH0} has been calculated. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{pi1}}{The assumed probability or probabilities in the active treatment group in two-group designs, or the alternative probability for a one-group design.} \item{\code{pi2}}{The assumed probability in the reference group for two-group designs. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{groups}}{The group numbers. Is a numeric vector.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{optimumAllocationRatio}}{The allocation ratio that is optimum with respect to the overall sample size at given power. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{effect}}{The effect for randomly creating normally distributed responses. Is a numeric vector of length \code{kMax}.} \item{\code{overallReject}}{The overall rejection probability. Is a numeric vector.} \item{\code{rejectPerStage}}{The probability to reject a hypothesis per stage of the trial. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{expectedNumberOfSubjects}}{The expected number of subjects under specified alternative.} \item{\code{nFixed}}{The sample size in a fixed (one-stage) design. Is a positive numeric vector.} \item{\code{nFixed1}}{The sample size in treatment arm 1 in a fixed (one-stage) design. Is a positive numeric vector.} \item{\code{nFixed2}}{The sample size in treatment arm 2 in a fixed (one-stage) design. Is a positive numeric vector.} \item{\code{informationRates}}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{maxNumberOfSubjects}}{The maximum number of subjects for power calculations. Is a numeric vector.} \item{\code{maxNumberOfSubjects1}}{The maximum number of subjects in treatment arm 1. Is a numeric vector.} \item{\code{maxNumberOfSubjects2}}{The maximum number of subjects in treatment arm 2. Is a numeric vector.} \item{\code{numberOfSubjects}}{In simulation results data set: The number of subjects under consideration when the interim analysis takes place.} \item{\code{numberOfSubjects1}}{In simulation results data set: The number of subjects under consideration in treatment arm 1 when the interim analysis takes place.} \item{\code{numberOfSubjects2}}{In simulation results data set: The number of subjects under consideration in treatment arm 2 when the interim analysis takes place.} \item{\code{expectedNumberOfSubjectsH0}}{The expected number of subjects under H0. Is a numeric vector.} \item{\code{expectedNumberOfSubjectsH01}}{The expected number of subjects under a value between H0 and H1. Is a numeric vector.} \item{\code{expectedNumberOfSubjectsH1}}{The expected number of subjects under H1. Is a numeric vector.} \item{\code{criticalValuesEffectScale}}{The critical values for each stage of the trial on the effect size scale.} \item{\code{criticalValuesEffectScaleLower}}{The lower critical values for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{criticalValuesEffectScaleUpper}}{The upper critical values for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{criticalValuesPValueScale}}{The critical values for each stage of the trial on the p-value scale.} \item{\code{futilityBoundsEffectScale}}{The futility bounds for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{futilityBoundsEffectScaleLower}}{The lower futility bounds for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{futilityBoundsEffectScaleUpper}}{The upper futility bounds for each stage of the trial on the effect size scale. Is a numeric matrix.} \item{\code{futilityBoundsPValueScale}}{The futility bounds for each stage of the trial on the p-value scale. Is a numeric matrix.} }} \keyword{internal} rpact/man/DatasetMeans.Rd0000644000176200001440000000264114450467342015012 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \docType{class} \name{DatasetMeans} \alias{DatasetMeans} \title{Dataset of Means} \description{ Class for a dataset of means. } \details{ This object cannot be created directly; better use \code{\link{getDataset}} with suitable arguments to create a dataset of means. } \section{Fields}{ \describe{ \item{\code{groups}}{The group numbers. Is a numeric vector.} \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{sampleSizes}}{The sample sizes for each group and stage. Is a numeric vector of length number of stages times number of groups containing whole numbers.} \item{\code{means}}{The means. Is a numeric vector of length number of stages times number of groups.} \item{\code{stDevs}}{The standard deviations. Is a numeric vector of length number of stages times number of groups.} \item{\code{overallSampleSizes}}{The overall, i.e., cumulative sample sizes. Is a numeric vector of length number of stages times number of groups.} \item{\code{overallMeans}}{The overall, i.e., cumulative means. Is a numeric vector of length number of stages times number of groups.} \item{\code{overallStDevs}}{The overall, i.e., cumulative standard deviations. Is a numeric vector of length number of stages times number of groups.} }} \keyword{internal} rpact/man/param_activeArms.Rd0000644000176200001440000000060014232463334015703 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_activeArms} \alias{param_activeArms} \title{Parameter Description: Active Arms} \arguments{ \item{activeArms}{The number of active treatment arms to be compared with control, default is \code{3}.} } \description{ Parameter Description: Active Arms } \keyword{internal} rpact/man/param_threshold.Rd0000644000176200001440000000106514335631011015600 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_threshold} \alias{param_threshold} \title{Parameter Description: Threshold} \arguments{ \item{threshold}{Selection criterion: treatment arm / population is selected only if \code{effectMeasure} exceeds \code{threshold}, default is \code{-Inf}. \code{threshold} can also be a vector of length \code{activeArms} referring to a separate threshold condition over the treatment arms.} } \description{ Parameter Description: Threshold } \keyword{internal} rpact/man/AnalysisResultsMultiArmFisher-class.Rd0000644000176200001440000001037614450467342021531 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsMultiArmFisher-class} \alias{AnalysisResultsMultiArmFisher-class} \alias{AnalysisResultsMultiArmFisher} \title{Analysis Results Multi-Arm Fisher} \description{ Class for multi-arm analysis results based on a Fisher combination test design. } \details{ This object cannot be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the multi-arm analysis results of a Fisher combination test design. } \section{Fields}{ \describe{ \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{pi1}}{The assumed probability or probabilities in the active treatment group in two-group designs, or the alternative probability for a one-group design.} \item{\code{pi2}}{The assumed probability in the reference group for two-group designs. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{assumedStDevs}}{Assumed standard deviations to calculate conditional power in multi-arm trials or enrichment designs. Is a numeric vector.} \item{\code{piTreatments}}{The assumed rates in the treatment groups for multi-arm and enrichment designs, i.e., designs with multiple subsets.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple (i.e., >2) treatment arms or population enrichment designs when testing means. Available options for multiple arms: \code{"overallPooled", "pairwisePooled", "notPooled"}. Available options for enrichment designs: \code{"pooled", "pooledFromFull", "notPooled"}.} \item{\code{conditionalRejectionProbabilities}}{The probabilities of rejecting the null hypothesis at each stage, given the stage has been reached. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{repeatedConfidenceIntervalLowerBounds}}{The lower bound of the confidence intervals that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedConfidenceIntervalUpperBounds}}{The upper bound of the confidence interval that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedPValues}}{The p-values that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{piControl}}{The assumed probability in the control arm for simulation and under which the sample size recalculation is performed. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{conditionalPowerSimulated}}{The simulated conditional power, under the assumption of observed or assumed effect sizes.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} }} \keyword{internal} rpact/man/plot.TrialDesignSet.Rd0000644000176200001440000001125714372411347016277 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{plot.TrialDesignSet} \alias{plot.TrialDesignSet} \title{Trial Design Set Plotting} \usage{ \method{plot}{TrialDesignSet}( x, y, ..., type = 1L, main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, grid = 1, plotSettings = NULL ) } \arguments{ \item{x}{The trial design set, obtained from \code{\link[=getDesignSet]{getDesignSet()}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} \item{type}{The plot type (default = \code{1}). The following plot types are available: \itemize{ \item \code{1}: creates a 'Boundaries' plot \item \code{3}: creates a 'Stage Levels' plot \item \code{4}: creates a 'Error Spending' plot \item \code{5}: creates a 'Power and Early Stopping' plot \item \code{6}: creates an 'Average Sample Size and Power / Early Stop' plot \item \code{7}: creates an 'Power' plot \item \code{8}: creates an 'Early Stopping' plot \item \code{9}: creates an 'Average Sample Size' plot \item \code{"all"}: creates all available plots and returns it as a grid plot or list }} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{palette}{The palette, default is \code{"Set1"}.} \item{theta}{A vector of standardized effect sizes (theta values), default is a sequence from -1 to 1.} \item{nMax}{The maximum sample size. Must be a positive integer of length 1.} \item{plotPointsEnabled}{Logical. If \code{TRUE}, additional points will be plotted.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{Logical. If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with the base R \code{plot} function. Alternatively \code{showSource} can be defined as one of the following character values: \itemize{ \item \code{"commands"}: returns a character vector with plot commands \item \code{"axes"}: returns a list with the axes definitions \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function does not stop if an error occurs) \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function stops if an error occurs) } Note: no plot object will be returned if \code{showSource} is a character.} \item{grid}{An integer value specifying the output of multiple plots. By default (\code{1}) a list of \code{ggplot} objects will be returned. If a \code{grid} value > 1 was specified, a grid plot will be returned if the number of plots is <= specified \code{grid} value; a list of \code{ggplot} objects will be returned otherwise. If \code{grid = 0} is specified, all plots will be created using \code{\link[base]{print}} command and a list of \code{ggplot} objects will be returned invisible. Note that one of the following packages must be installed to create a grid plot: 'ggpubr', 'gridExtra', or 'cowplot'.} \item{plotSettings}{An object of class \code{PlotSettings} created by \code{\link[=getPlotSettings]{getPlotSetting()s}}.} } \value{ Returns a \code{ggplot2} object. } \description{ Plots a trial design set. } \details{ Generic function to plot a trial design set. Is, e.g., useful to compare different designs or design parameters visual. } \examples{ \dontrun{ design <- getDesignInverseNormal( kMax = 3, alpha = 0.025, typeOfDesign = "asKD", gammaA = 2, informationRates = c(0.2, 0.7, 1), typeBetaSpending = "bsOF" ) # Create a set of designs based on the master design defined above # and varied parameter 'gammaA' designSet <- getDesignSet(design = design, gammaA = 4) if (require(ggplot2)) plot(designSet, type = 1, legendPosition = 6) } } rpact/man/param_adaptations.Rd0000644000176200001440000000070114335631011016107 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_adaptations} \alias{param_adaptations} \title{Parameter Description: Adaptations} \arguments{ \item{adaptations}{A logical vector of length \code{kMax - 1} indicating whether or not an adaptation takes place at interim k, default is \code{rep(TRUE, kMax - 1)}.} } \description{ Parameter Description: Adaptations } \keyword{internal} rpact/man/TrialDesignFisher.Rd0000644000176200001440000000714714450476536016022 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesignFisher} \alias{TrialDesignFisher} \title{Fisher Design} \description{ Trial design for Fisher's combination test. } \details{ This object should not be created directly; use \code{\link{getDesignFisher}} with suitable arguments to create a Fisher combination test design. } \section{Fields}{ \describe{ \item{\code{kMax}}{The maximum number of stages \code{K}. Is a numeric vector of length 1 containing a whole number.} \item{\code{alpha}}{The significance level alpha, default is 0.025. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{informationRates}}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{userAlphaSpending}}{The user defined alpha spending. Contains the cumulative alpha-spending (type I error rate) up to each interim stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{criticalValues}}{The critical values for each stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{stageLevels}}{The adjusted significance levels to reach significance in a group sequential design. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{alphaSpent}}{The cumulative alpha spent at each stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{bindingFutility}}{If \code{TRUE}, the calculation of the critical values is affected by the futility bounds and the futility threshold is binding in the sense that the study must be stopped if the futility condition was reached (default is \code{FALSE}) Is a logical vector of length 1.} \item{\code{tolerance}}{The numerical tolerance, default is \code{1e-06}. Is a numeric vector of length 1.} \item{\code{method}}{"equalAlpha", "fullAlpha", "noInteraction", or "userDefinedAlpha", default is "equalAlpha". For details, see Wassmer, 1999, doi: 10.1002/(SICI)1521-4036(199906)41:3\%3C279::AID-BIMJ279\%3E3.0.CO;2-V. Is a character vector of length 1.} \item{\code{alpha0Vec}}{The stopping for futility bounds for stage-wise p-values in Fisher's combination test. Is a numeric vector of length \code{kMax} minus 1 containing values between 0 and 1.} \item{\code{scale}}{The scale for Fisher's combination test. Numeric vector of length \code{kMax-1} that applies to Fisher's design with unequally spaced information rates. Is a numeric vector of length \code{kMax} minus 1 containing values between 0 and 1.} \item{\code{nonStochasticCurtailment}}{If \code{TRUE}, the stopping rule is based on the phenomenon of non-stochastic curtailment rather than stochastic reasoning. Is a logical vector of length 1.} \item{\code{sided}}{Describes if the alternative is one-sided (\code{1}) or two-sided (\code{2}). Is a numeric vector of length 1 containing a whole number.} \item{\code{simAlpha}}{The observed alpha error if simulations have been performed. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} }} \seealso{ \code{\link{getDesignFisher}} for creating a Fisher combination test design. } \keyword{internal} rpact/man/param_rValue.Rd0000644000176200001440000000065214335631011015043 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_rValue} \alias{param_rValue} \title{Parameter Description: R Value} \arguments{ \item{rValue}{For \code{typeOfSelection = "rbest"} (select the \code{rValue} best treatment arms / populations), the parameter \code{rValue} has to be specified.} } \description{ Parameter Description: R Value } \keyword{internal} rpact/man/plot.ParameterSet.Rd0000644000176200001440000000532314335631006016002 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{plot.ParameterSet} \alias{plot.ParameterSet} \title{Parameter Set Plotting} \usage{ \method{plot}{ParameterSet}( x, y, ..., main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1L, palette = "Set1", legendPosition = NA_integer_, showSource = FALSE, plotSettings = NULL ) } \arguments{ \item{x}{The object that inherits from \code{\link{ParameterSet}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{type}{The plot type (default = 1).} \item{palette}{The palette, default is \code{"Set1"}.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{Logical. If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with the base R \code{plot} function. Alternatively \code{showSource} can be defined as one of the following character values: \itemize{ \item \code{"commands"}: returns a character vector with plot commands \item \code{"axes"}: returns a list with the axes definitions \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function does not stop if an error occurs) \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function stops if an error occurs) } Note: no plot object will be returned if \code{showSource} is a character.} \item{plotSettings}{An object of class \code{PlotSettings} created by \code{\link[=getPlotSettings]{getPlotSetting()s}}.} } \value{ Returns a \code{ggplot2} object. } \description{ Plots an object that inherits from class \code{\link{ParameterSet}}. } \details{ Generic function to plot a parameter set. } rpact/man/getRawData.Rd0000644000176200001440000000477214402556624014472 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \name{getRawData} \alias{getRawData} \title{Get Simulation Raw Data for Survival} \usage{ getRawData(x, aggregate = FALSE) } \arguments{ \item{x}{A \code{\link{SimulationResults}} object created by \code{\link[=getSimulationSurvival]{getSimulationSurvival()}}.} \item{aggregate}{Logical. If \code{TRUE} the raw data will be aggregated similar to the result of \code{\link[=getData]{getData()}}, default is \code{FALSE}.} } \value{ Returns a \code{\link[base]{data.frame}}. } \description{ Returns the raw survival data which was generated for simulation. } \details{ This function works only if \code{\link[=getSimulationSurvival]{getSimulationSurvival()}} was called with a \cr \code{maxNumberOfRawDatasetsPerStage} > 0 (default is \code{0}). This function can be used to get the simulated raw data from a simulation results object obtained by \code{\link[=getSimulationSurvival]{getSimulationSurvival()}}. Note that \code{\link[=getSimulationSurvival]{getSimulationSurvival()}} must called before with \code{maxNumberOfRawDatasetsPerStage} > 0. The data frame contains the following columns: \enumerate{ \item \code{iterationNumber}: The number of the simulation iteration. \item \code{stopStage}: The stage of stopping. \item \code{subjectId}: The subject id (increasing number 1, 2, 3, ...) \item \code{accrualTime}: The accrual time, i.e., the time when the subject entered the trial. \item \code{treatmentGroup}: The treatment group number (1 or 2). \item \code{survivalTime}: The survival time of the subject. \item \code{dropoutTime}: The dropout time of the subject (may be \code{NA}). \item \code{observationTime}: The specific observation time. \item \code{timeUnderObservation}: The time under observation is defined as follows:\cr if (event == TRUE) {\cr timeUnderObservation <- survivalTime;\cr } else if (dropoutEvent == TRUE) {\cr timeUnderObservation <- dropoutTime;\cr } else {\cr timeUnderObservation <- observationTime - accrualTime;\cr } \item \code{event}: \code{TRUE} if an event occurred; \code{FALSE} otherwise. \item \code{dropoutEvent}: \code{TRUE} if an dropout event occurred; \code{FALSE} otherwise. } } \examples{ \dontrun{ results <- getSimulationSurvival( pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, eventTime = 12, accrualTime = 24, plannedEvents = 40, maxNumberOfSubjects = 200, maxNumberOfIterations = 50, maxNumberOfRawDatasetsPerStage = 5 ) rawData <- getRawData(results) head(rawData) dim(rawData) } } rpact/man/getPowerSurvival.Rd0000644000176200001440000003753114417202031015760 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getPowerSurvival} \alias{getPowerSurvival} \title{Get Power Survival} \usage{ getPowerSurvival( design = NULL, ..., typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0 = 1, directionUpper = NA, pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, kappa = 1, hazardRatio = NA_real_, piecewiseSurvivalTime = NA_real_, allocationRatioPlanned = 1, eventTime = 12, accrualTime = c(0, 12), accrualIntensity = 0.1, accrualIntensityType = c("auto", "absolute", "relative"), maxNumberOfSubjects = NA_real_, maxNumberOfEvents = NA_real_, dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12 ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{typeOfComputation}{Three options are available: \code{"Schoenfeld"}, \code{"Freedman"}, \code{"HsiehFreedman"}, the default is \code{"Schoenfeld"}. For details, see Hsieh (Statistics in Medicine, 1992). For non-inferiority testing (i.e., \code{thetaH0 != 1}), only Schoenfeld's formula can be used.} \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{pi1}{A numeric value or vector that represents the assumed event rate in the treatment group, default is \code{seq(0.2, 0.5, 0.1)} (power calculations and simulations) or \code{seq(0.4, 0.6, 0.1)} (sample size calculations).} \item{pi2}{A numeric value that represents the assumed event rate in the control group, default is \code{0.2}.} \item{lambda1}{The assumed hazard rate in the treatment group, there is no default. \code{lambda1} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} \item{lambda2}{The assumed hazard rate in the reference group, there is no default. \code{lambda2} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} \item{median1}{The assumed median survival time in the treatment group, there is no default.} \item{median2}{The assumed median survival time in the reference group, there is no default. Must be a positive numeric of length 1.} \item{kappa}{A numeric value > 0. A \code{kappa != 1} will be used for the specification of the shape of the Weibull distribution. Default is \code{1}, i.e., the exponential survival distribution is used instead of the Weibull distribution. Note that the Weibull distribution cannot be used for the piecewise definition of the survival time distribution, i.e., only \code{piecewiselambda} (as a single value) and \code{kappa} can be specified. This function is equivalent to \code{pweibull(t, shape = kappa, scale = 1 / lambda)} of the \code{stats} package, i.e., the scale parameter is \code{1 / 'hazard rate'}.\cr For example, \code{getPiecewiseExponentialDistribution(time = 130, piecewiseLambda = 0.01, kappa = 4.2)} and \code{pweibull(q = 130, shape = 4.2, scale = 1 / 0.01)} provide the sample result.} \item{hazardRatio}{The vector of hazard ratios under consideration. If the event or hazard rates in both treatment groups are defined, the hazard ratio needs not to be specified as it is calculated, there is no default. Must be a positive numeric of length 1.} \item{piecewiseSurvivalTime}{A vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function \cr (for details see \code{\link[=getPiecewiseSurvivalTime]{getPiecewiseSurvivalTime()}}).} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{eventTime}{The assumed time under which the event rates are calculated, default is \code{12}.} \item{accrualTime}{The assumed accrual time intervals for the study, default is \code{c(0, 12)} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{accrualIntensity}{A numeric vector of accrual intensities, default is the relative intensity \code{0.1} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{accrualIntensityType}{A character value specifying the accrual intensity input type. Must be one of \code{"auto"}, \code{"absolute"}, or \code{"relative"}; default is \code{"auto"}, i.e., if all values are < 1 the type is \code{"relative"}, otherwise it is \code{"absolute"}.} \item{maxNumberOfSubjects}{\code{maxNumberOfSubjects > 0} needs to be specified. If accrual time and accrual intensity are specified, this will be calculated. Must be a positive integer of length 1.} \item{maxNumberOfEvents}{\code{maxNumberOfEvents > 0} is the maximum number of events, it determines the power of the test and needs to be specified.} \item{dropoutRate1}{The assumed drop-out rate in the treatment group, default is \code{0}.} \item{dropoutRate2}{The assumed drop-out rate in the control group, default is \code{0}.} \item{dropoutTime}{The assumed time for drop-out rates in the control and the treatment group, default is \code{12}.} } \value{ Returns a \code{\link{TrialDesignPlan}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.TrialDesignSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.TrialDesignPlan]{plot()}} to plot the object, \item \code{\link[=as.data.frame.TrialDesignPlan]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the power, stopping probabilities, and expected sample size for testing the hazard ratio in a two treatment groups survival design. } \details{ At given design the function calculates the power, stopping probabilities, and expected sample size at given number of events and number of subjects. It also calculates the time when the required events are expected under the given assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times and constant or non-constant piecewise accrual). Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number of subjects in the two treatment groups. The formula of Kim & Tsiatis (Biometrics, 1990) is used to calculate the expected number of events under the alternative (see also Lakatos & Lan, Statistics in Medicine, 1992). These formulas are generalized to piecewise survival times and non-constant piecewise accrual over time.\cr } \section{Piecewise survival time}{ The first element of the vector \code{piecewiseSurvivalTime} must be equal to \code{0}. \code{piecewiseSurvivalTime} can also be a list that combines the definition of the time intervals and hazard rates in the reference group. The definition of the survival time in the treatment group is obtained by the specification of the hazard ratio (see examples for details). } \section{Staggered patient entry}{ \code{accrualTime} is the time period of subjects' accrual in a study. It can be a value that defines the end of accrual or a vector. In this case, \code{accrualTime} can be used to define a non-constant accrual over time. For this, \code{accrualTime} is a vector that defines the accrual intervals. The first element of \code{accrualTime} must be equal to \code{0} and, additionally, \code{accrualIntensity} needs to be specified. \code{accrualIntensity} itself is a value or a vector (depending on the length of \code{accrualTime}) that defines the intensity how subjects enter the trial in the intervals defined through \code{accrualTime}. \code{accrualTime} can also be a list that combines the definition of the accrual time and accrual intensity (see below and examples for details). If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to be specified and the end of accrual is calculated. In that case, \code{accrualIntensity} is the number of subjects per time unit, i.e., the absolute accrual intensity. If the length of \code{accrualTime} equals the length of \code{accrualIntensity - 1} (i.e., the end of accrual is defined), \code{maxNumberOfSubjects} is calculated if the absolute accrual intensity is given. If all elements in \code{accrualIntensity} are smaller than 1, \code{accrualIntensity} defines the \emph{relative} intensity how subjects enter the trial. For example, \code{accrualIntensity = c(0.1, 0.2)} specifies that in the second accrual interval the intensity is doubled as compared to the first accrual interval. The actual (absolute) accrual intensity is calculated for the calculated or given \code{maxNumberOfSubjects}. Note that the default is \code{accrualIntensity = 0.1} meaning that the \emph{absolute} accrual intensity will be calculated. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Fixed sample size with minimum required definitions, pi1 = c(0.4,0.5,0.5) and # pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default getPowerSurvival(maxNumberOfEvents = 40, maxNumberOfSubjects = 200) \dontrun{ # Four stage O'Brien & Fleming group sequential design with minimum required # definitions, pi1 = c(0.4,0.5,0.5) and pi2 = 0.2 at event time 12, # accrual time 12 and follow-up time 6 as default getPowerSurvival(design = getDesignGroupSequential(kMax = 4), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) # For fixed sample design, determine necessary accrual time if 200 subjects and # 30 subjects per time unit can be recruited getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0), accrualIntensity = 30, maxNumberOfSubjects = 200) # Determine necessary accrual time if 200 subjects and if the first 6 time units # 20 subjects per time unit can be recruited, then 30 subjects per time unit getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200) # Determine maximum number of Subjects if the first 6 time units 20 subjects per # time unit can be recruited, and after 10 time units 30 subjects per time unit getPowerSurvival(maxNumberOfEvents = 40, accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30)) # Specify accrual time as a list at <- list( "0 - <6" = 20, "6 - Inf" = 30) getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at, maxNumberOfSubjects = 200) # Specify accrual time as a list, if maximum number of subjects need to be calculated at <- list( "0 - <6" = 20, "6 - <=10" = 30) getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at) # Specify effect size for a two-stage group design with O'Brien & Fleming boundaries # Effect size is based on event rates at specified event time, directionUpper = FALSE # needs to be specified because it should be shown that hazard ratio < 1 getPowerSurvival(design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) # Effect size is based on event rate at specified event time for the reference group # and hazard ratio, directionUpper = FALSE needs to be specified # because it should be shown that hazard ratio < 1 getPowerSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) # Effect size is based on hazard rate for the reference group and hazard ratio, # directionUpper = FALSE needs to be specified because it should be shown that # hazard ratio < 1 getPowerSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) # Specification of piecewise exponential survival time and hazard ratios getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01,0.02,0.04), hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) # Specification of piecewise exponential survival time as list and hazard ratios pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) # Specification of piecewise exponential survival time for both treatment arms getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015,0.03,0.06), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) # Specification of piecewise exponential survival time as a list pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) getPowerSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200) # Specify effect size based on median survival times getPowerSurvival(median1 = 5, median2 = 3, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) # Specify effect size based on median survival times of # Weibull distribtion with kappa = 2 getPowerSurvival(median1 = 5, median2 = 3, kappa = 2, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE) } } \seealso{ Other power functions: \code{\link{getPowerMeans}()}, \code{\link{getPowerRates}()} } \concept{power functions} rpact/man/param_minNumberOfEventsPerStage.Rd0000644000176200001440000000116114335631011020642 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_minNumberOfEventsPerStage} \alias{param_minNumberOfEventsPerStage} \title{Parameter Description: Min Number Of Events Per Stage} \arguments{ \item{minNumberOfEventsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{minNumberOfEventsPerStage} with length kMax determines the minimum number of events per stage (i.e., not cumulated), the first element is not taken into account.} } \description{ Parameter Description: Min Number Of Events Per Stage } \keyword{internal} rpact/man/param_informationEpsilon.Rd0000644000176200001440000000147414335631011017467 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_informationEpsilon} \alias{param_informationEpsilon} \title{Parameter Description: Information Epsilon} \arguments{ \item{informationEpsilon}{Positive integer value specifying the absolute information epsilon, which defines the maximum distance from the observed information to the maximum information that causes the final analysis. Updates at the final analysis in case the observed information at the final analysis is smaller ("under-running") than the planned maximum information \code{maxInformation}, default is 0. Alternatively, a floating-point number > 0 and < 1 can be specified to define a relative information epsilon.} } \description{ Parameter Description: Information Epsilon } \keyword{internal} rpact/man/getSimulationEnrichmentRates.Rd0000644000176200001440000003020114417202031020253 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_enrichment_rates.R \name{getSimulationEnrichmentRates} \alias{getSimulationEnrichmentRates} \title{Get Simulation Enrichment Rates} \usage{ getSimulationEnrichmentRates( design = NULL, ..., effectList = NULL, intersectionTest = c("Simes", "SpiessensDebois", "Bonferroni", "Sidak"), stratifiedAnalysis = TRUE, directionUpper = TRUE, adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), effectMeasure = c("effectEstimate", "testStatistic"), successCriterion = c("all", "atLeastOne"), epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedSubjects = NA_real_, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, piTreatmentH1 = NA_real_, piControlH1 = NA_real_, maxNumberOfIterations = 1000L, seed = NA_real_, calcSubjectsFunction = NULL, selectPopulationsFunction = NULL, showStatistics = FALSE ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{effectList}{List of subsets, prevalences, and effect sizes with columns and number of rows reflecting the different situations to consider (see examples).} \item{intersectionTest}{Defines the multiple test for the intersection hypotheses in the closed system of hypotheses. Four options are available in enrichment designs: \code{"SpiessensDebois"}, \code{"Bonferroni"}, \code{"Simes"}, and \code{"Sidak"}, default is \code{"Simes"}.} \item{stratifiedAnalysis}{Logical. For enrichment designs, typically a stratified analysis should be chosen. For testing rates, also a non-stratified analysis based on overall data can be performed. For survival data, only a stratified analysis is possible (see Brannath et al., 2009), default is \code{TRUE}.} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{adaptations}{A logical vector of length \code{kMax - 1} indicating whether or not an adaptation takes place at interim k, default is \code{rep(TRUE, kMax - 1)}.} \item{typeOfSelection}{The way the treatment arms or populations are selected at interim. Five options are available: \code{"best"}, \code{"rbest"}, \code{"epsilon"}, \code{"all"}, and \code{"userDefined"}, default is \code{"best"}.\cr For \code{"rbest"} (select the \code{rValue} best treatment arms/populations), the parameter \code{rValue} has to be specified, for \code{"epsilon"} (select treatment arm/population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. If \code{"userDefined"} is selected, \code{"selectArmsFunction"} or \code{"selectPopulationsFunction"} has to be specified.} \item{effectMeasure}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (difference for means and rates or ratio for survival) (\code{"effectEstimate"}), default is \code{"effectEstimate"}.} \item{successCriterion}{Defines when the study is stopped for efficacy at interim. Two options are available: \code{"all"} stops the trial if the efficacy criterion is fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim, default is \code{"all"}.} \item{epsilonValue}{For \code{typeOfSelection = "epsilon"} (select treatment arm / population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. Must be a numeric of length 1.} \item{rValue}{For \code{typeOfSelection = "rbest"} (select the \code{rValue} best treatment arms / populations), the parameter \code{rValue} has to be specified.} \item{threshold}{Selection criterion: treatment arm / population is selected only if \code{effectMeasure} exceeds \code{threshold}, default is \code{-Inf}. \code{threshold} can also be a vector of length \code{activeArms} referring to a separate threshold condition over the treatment arms.} \item{plannedSubjects}{\code{plannedSubjects} is a numeric vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs, \code{plannedSubjects} refers to the number of subjects per selected active arm.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{minNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{minNumberOfSubjectsPerStage} with length kMax determines the minimum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{minNumberOfSubjectsPerStage} refers to the minimum number of subjects per selected active arm.} \item{maxNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{maxNumberOfSubjectsPerStage} with length kMax determines the maximum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{maxNumberOfSubjectsPerStage} refers to the maximum number of subjects per selected active arm.} \item{conditionalPower}{If \code{conditionalPower} together with \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (or \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} for survival designs) is specified, a sample size recalculation based on the specified conditional power is performed. It is defined as the power for the subsequent stage given the current data. By default, the conditional power will be calculated under the observed effect size. Optionally, you can also specify \code{thetaH1} and \code{stDevH1} (for simulating means), \code{pi1H1} and \code{pi2H1} (for simulating rates), or \code{thetaH1} (for simulating hazard ratios) as parameters under which it is calculated and the sample size recalculation is performed.} \item{piTreatmentH1}{If specified, the assumed probabilities in the active arm under which the sample size recalculation was performed and the conditional power was calculated.} \item{piControlH1}{If specified, the assumed probabilities in the control arm under which the sample size recalculation was performed and the conditional power was calculated.} \item{maxNumberOfIterations}{The number of simulation iterations, default is \code{1000}. Must be a positive integer of length 1.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} \item{calcSubjectsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, sample size recalculation is performed with conditional power and specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details and examples).} \item{selectPopulationsFunction}{Optionally, a function can be entered that defines the way of how populations are selected. This function is allowed to depend on \code{effectVector} with length \code{populations} and \code{stage} (see examples).} \item{showStatistics}{Logical. If \code{TRUE}, summary statistics of the simulated data are displayed for the \code{print} command, otherwise the output is suppressed, default is \code{FALSE}.} } \value{ Returns a \code{\link{SimulationResults}} object. The following generics (R generic functions) are available for this object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.SimulationResults]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the simulated power, stopping and selection probabilities, conditional power, and expected sample size for testing rates in an enrichment design testing situation. } \details{ At given design the function simulates the power, stopping probabilities, selection probabilities, and expected sample size at given number of subjects, parameter configuration, and treatment arm selection rule in the enrichment situation. An allocation ratio can be specified referring to the ratio of number of subjects in the active treatment groups as compared to the control group. The definition of \code{piTreatmentH1} and/or \code{piControlH1} makes only sense if \code{kMax} > 1 and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. \code{calcSubjectsFunction}\cr This function returns the number of subjects at given conditional power and conditional critical value for specified testing situation. The function might depend on the variables \code{stage}, \code{selectedPopulations}, \code{directionUpper}, \code{plannedSubjects}, \code{allocationRatioPlanned}, \code{minNumberOfSubjectsPerStage}, \code{maxNumberOfSubjectsPerStage}, \code{conditionalPower}, \code{conditionalCriticalValue}, \code{overallRatesTreatment}, \code{overallRatesControl}, \code{piTreatmentH1}, and \code{piControlH1}. The function has to contain the three-dots argument '...' (see examples). } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ \dontrun{ # Assess a population selection strategy with two subset populations and # a binary endpoint using a stratified analysis. No early efficacy stop, # weighted inverse normal method with weight sqrt(0.4). pi2 <- c(0.3, 0.4, 0.3, 0.55) pi1Seq <- seq(0.0, 0.2, 0.2) pi1 <- matrix(rep(pi1Seq, length(pi2)), ncol = length(pi1Seq), byrow = TRUE) + pi2 effectList <- list( subGroups = c("S1", "S2", "S12", "R"), prevalences = c(0.1, 0.4, 0.2, 0.3), piControl = pi2, piTreatments = expand.grid(pi1[1, ], pi1[2, ], pi1[3, ], pi1[4, ]) ) design <- getDesignInverseNormal(informationRates = c(0.4, 1), typeOfDesign = "noEarlyEfficacy") simResultsPE <- getSimulationEnrichmentRates(design, plannedSubjects = c(150, 300), allocationRatioPlanned = 1.5, directionUpper = TRUE, effectList = effectList, stratifiedAnalysis = TRUE, intersectionTest = "Sidak", typeOfSelection = "epsilon", epsilonValue = 0.025, maxNumberOfIterations = 100) print(simResultsPE) } } rpact/man/param_selectArmsFunction.Rd0000644000176200001440000000105414232463334017421 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_selectArmsFunction} \alias{param_selectArmsFunction} \title{Parameter Description: Select Arms Function} \arguments{ \item{selectArmsFunction}{Optionally, a function can be entered that defines the way of how treatment arms are selected. This function is allowed to depend on \code{effectVector} with length \code{activeArms} and \code{stage} (see examples).} } \description{ Parameter Description: Select Arms Function } \keyword{internal} rpact/man/testPackage.Rd0000644000176200001440000000350514335631010014657 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_quality_assurance.R \name{testPackage} \alias{testPackage} \title{Test Package} \usage{ testPackage( outDir = ".", ..., completeUnitTestSetEnabled = TRUE, types = "tests", connection = list(token = NULL, secret = NULL) ) } \arguments{ \item{outDir}{The output directory where all test results shall be saved. By default the current working directory is used.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{completeUnitTestSetEnabled}{If \code{TRUE} (default) all existing unit tests will be executed; a subset of all unit tests will be used otherwise.} \item{types}{The type(s) of tests to be done. Can be one or more of \code{c("tests", "examples", "vignettes")}, default is "tests" only.} \item{connection}{A \code{list} where owners of the rpact validation documentation can enter a \code{token} and a \code{secret} to get full access to all unit tests, e.g., to fulfill regulatory requirements (see \href{https://www.rpact.com}{www.rpact.com} for more information).} } \value{ The value of \code{completeUnitTestSetEnabled} will be returned invisible. } \description{ This function allows the installed package \code{rpact} to be tested. } \details{ This function creates the subdirectory \code{rpact-tests} in the specified output directory and copies all unit test files of the package to this newly created directory. Then the function runs all tests (or a subset of all tests if \code{completeUnitTestSetEnabled} is \code{FALSE}) using \code{\link[tools]{testInstalledPackage}}. The test results will be saved to the text file \code{testthat.Rout} that can be found in the subdirectory \code{rpact-tests}. } \examples{ \dontrun{ testPackage() } } rpact/man/param_three_dots.Rd0000644000176200001440000000064214335631010015743 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_three_dots} \alias{param_three_dots} \title{Parameter Description: "..."} \arguments{ \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} } \description{ Parameter Description: "..." } \keyword{internal} rpact/man/param_plannedEvents.Rd0000644000176200001440000000132214335631011016406 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_plannedEvents} \alias{param_plannedEvents} \title{Parameter Description: Planned Events} \arguments{ \item{plannedEvents}{\code{plannedEvents} is a numeric vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) events in survival designs when the interim stages are planned. For two treatment arms, it is the number of events for both treatment arms. For multi-arm designs, \code{plannedEvents} refers to the overall number of events for the selected arms plus control.} } \description{ Parameter Description: Planned Events } \keyword{internal} rpact/man/param_stDevH1.Rd0000644000176200001440000000105414335631011015060 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_stDevH1} \alias{param_stDevH1} \title{Parameter Description: Standard Deviation Under Alternative} \arguments{ \item{stDevH1}{If specified, the value of the standard deviation under which the conditional power or sample size recalculation calculation is performed, default is the value of \code{stDev}. Must be a positive numeric of length 1.} } \description{ Parameter Description: Standard Deviation Under Alternative } \keyword{internal} rpact/man/dataEnrichmentRates.Rd0000644000176200001440000000101414313321260016341 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataEnrichmentRates} \alias{dataEnrichmentRates} \title{Enrichment Dataset of Rates} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataEnrichmentRates } \description{ A dataset containing the sample sizes and events of two groups. Use \code{getDataset(dataEnrichmentRates)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/param_legendPosition.Rd0000644000176200001440000000165214335631010016570 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_legendPosition} \alias{param_legendPosition} \title{Parameter Description: Legend Position On Plots} \arguments{ \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} } \description{ Parameter Description: Legend Position On Plots } \keyword{internal} rpact/man/plot.SummaryFactory.Rd0000644000176200001440000000147714436052675016414 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_summary.R \name{plot.SummaryFactory} \alias{plot.SummaryFactory} \title{Summary Factory Plotting} \usage{ \method{plot}{SummaryFactory}(x, y, ..., showSummary = FALSE) } \arguments{ \item{x}{The summary factory object.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} \item{showSummary}{Show the summary before creating the plot output, default is \code{FALSE}.} } \value{ Returns a \code{ggplot2} object. } \description{ Plots a summary factory. } \details{ Generic function to plot all kinds of summary factories. } rpact/man/getSimulationMeans.Rd0000644000176200001440000003372514417202031016241 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_base_means.R \name{getSimulationMeans} \alias{getSimulationMeans} \title{Get Simulation Means} \usage{ getSimulationMeans( design = NULL, ..., groups = 2L, normalApproximation = TRUE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = seq(0, 1, 0.2), stDev = 1, plannedSubjects = NA_real_, directionUpper = TRUE, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, stDevH1 = NA_real_, maxNumberOfIterations = 1000L, seed = NA_real_, calcSubjectsFunction = NULL, showStatistics = FALSE ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{normalApproximation}{The type of computation of the p-values. Default is \code{TRUE}, i.e., normally distributed test statistics are generated. If \code{FALSE}, the t test is used for calculating the p-values, i.e., t distributed test statistics are generated.} \item{meanRatio}{If \code{TRUE}, the design characteristics for one-sided testing of H0: \code{mu1 / mu2 = thetaH0} are simulated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{alternative}{The alternative hypothesis value for testing means under which the data is simulated. This can be a vector of assumed alternatives, default is \code{seq(0, 1, 0.2)}.} \item{stDev}{The standard deviation under which the data is simulated, default is \code{1}. If \code{meanRatio = TRUE} is specified, \code{stDev} defines the coefficient of variation \code{sigma / mu2}. Must be a positive numeric of length 1.} \item{plannedSubjects}{\code{plannedSubjects} is a numeric vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs, \code{plannedSubjects} refers to the number of subjects per selected active arm.} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{minNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{minNumberOfSubjectsPerStage} with length kMax determines the minimum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{minNumberOfSubjectsPerStage} refers to the minimum number of subjects per selected active arm.} \item{maxNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{maxNumberOfSubjectsPerStage} with length kMax determines the maximum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{maxNumberOfSubjectsPerStage} refers to the maximum number of subjects per selected active arm.} \item{conditionalPower}{If \code{conditionalPower} together with \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (or \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} for survival designs) is specified, a sample size recalculation based on the specified conditional power is performed. It is defined as the power for the subsequent stage given the current data. By default, the conditional power will be calculated under the observed effect size. Optionally, you can also specify \code{thetaH1} and \code{stDevH1} (for simulating means), \code{pi1H1} and \code{pi2H1} (for simulating rates), or \code{thetaH1} (for simulating hazard ratios) as parameters under which it is calculated and the sample size recalculation is performed.} \item{thetaH1}{If specified, the value of the alternative under which the conditional power or sample size recalculation calculation is performed. Must be a numeric of length 1.} \item{stDevH1}{If specified, the value of the standard deviation under which the conditional power or sample size recalculation calculation is performed, default is the value of \code{stDev}. Must be a positive numeric of length 1.} \item{maxNumberOfIterations}{The number of simulation iterations, default is \code{1000}. Must be a positive integer of length 1.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} \item{calcSubjectsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, sample size recalculation is performed with conditional power and specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details and examples).} \item{showStatistics}{Logical. If \code{TRUE}, summary statistics of the simulated data are displayed for the \code{print} command, otherwise the output is suppressed, default is \code{FALSE}.} } \value{ Returns a \code{\link{SimulationResults}} object. The following generics (R generic functions) are available for this object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.SimulationResults]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the simulated power, stopping probabilities, conditional power, and expected sample size for testing means in a one or two treatment groups testing situation. } \details{ At given design the function simulates the power, stopping probabilities, conditional power, and expected sample size at given number of subjects and parameter configuration. Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number of subjects in the two treatment groups. The definition of \code{thetaH1} makes only sense if \code{kMax} > 1 and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. \code{calcSubjectsFunction}\cr This function returns the number of subjects at given conditional power and conditional critical value for specified testing situation. The function might depend on variables \code{stage}, \code{meanRatio}, \code{thetaH0}, \code{groups}, \code{plannedSubjects}, \code{sampleSizesPerStage}, \code{directionUpper}, \code{allocationRatioPlanned}, \code{minNumberOfSubjectsPerStage}, \code{maxNumberOfSubjectsPerStage}, \code{conditionalPower}, \code{conditionalCriticalValue}, \code{thetaH1}, and \code{stDevH1}. The function has to contain the three-dots argument '...' (see examples). } \section{Simulation Data}{ The summary statistics "Simulated data" contains the following parameters: median \link{range}; mean +/-sd\cr \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable the output of the aggregated simulated data.\cr Example 1: \cr \code{simulationResults <- getSimulationMeans(plannedSubjects = 40)} \cr \code{simulationResults$show(showStatistics = FALSE)}\cr Example 2: \cr \code{simulationResults <- getSimulationMeans(plannedSubjects = 40)} \cr \code{simulationResults$setShowStatistics(FALSE)}\cr \code{simulationResults}\cr \code{\link[=getData]{getData()}} can be used to get the aggregated simulated data from the object as \code{\link[base]{data.frame}}. The data frame contains the following columns: \enumerate{ \item \code{iterationNumber}: The number of the simulation iteration. \item \code{stageNumber}: The stage. \item \code{alternative}: The alternative hypothesis value. \item \code{numberOfSubjects}: The number of subjects under consideration when the (interim) analysis takes place. \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. \item \code{testStatistic}: The test statistic that is used for the test decision, depends on which design was chosen (group sequential, inverse normal, or Fisher's combination test). \item \code{testStatisticsPerStage}: The test statistic for each stage if only data from the considered stage is taken into account. \item \code{effectEstimate}: Overall simulated standardized effect estimate. \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for selected sample size and effect. The effect is either estimated from the data or can be user defined with \code{thetaH1}. } } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Fixed sample size design with two groups, total sample size 40, # alternative = c(0, 0.2, 0.4, 0.8, 1), and standard deviation = 1 (the default) getSimulationMeans(plannedSubjects = 40, maxNumberOfIterations = 10) \dontrun{ # Increase number of simulation iterations and compare results # with power calculator using normal approximation getSimulationMeans(alternative = 0:4, stDev = 5, plannedSubjects = 40, maxNumberOfIterations = 1000) getPowerMeans(alternative = 0:4, stDev = 5, maxNumberOfSubjects = 40, normalApproximation = TRUE) # Do the same for a three-stage O'Brien&Fleming inverse # normal group sequential design with non-binding futility stops designIN <- getDesignInverseNormal(typeOfDesign = "OF", futilityBounds = c(0, 0)) x <- getSimulationMeans(designIN, alternative = c(0:4), stDev = 5, plannedSubjects = c(20, 40, 60), maxNumberOfIterations = 1000) getPowerMeans(designIN, alternative = 0:4, stDev = 5, maxNumberOfSubjects = 60, normalApproximation = TRUE) # Assess power and average sample size if a sample size increase is foreseen # at conditional power 80\% for each subsequent stage based on observed overall # effect and specified minNumberOfSubjectsPerStage and # maxNumberOfSubjectsPerStage getSimulationMeans(designIN, alternative = 0:4, stDev = 5, plannedSubjects = c(20, 40, 60), minNumberOfSubjectsPerStage = c(NA, 20, 20), maxNumberOfSubjectsPerStage = c(NA, 80, 80), conditionalPower = 0.8, maxNumberOfIterations = 50) # Do the same under the assumption that a sample size increase only takes # place at the first interim. The sample size for the third stage is set equal # to the second stage sample size. mySampleSizeCalculationFunction <- function(..., stage, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, sampleSizesPerStage, conditionalPower, conditionalCriticalValue, allocationRatioPlanned, thetaH1, stDevH1) { if (stage <= 2) { stageSubjects <- (1 + allocationRatioPlanned)^2/allocationRatioPlanned * (max(0, conditionalCriticalValue + stats::qnorm(conditionalPower)))^2 / (max(1e-12, thetaH1/stDevH1))^2 stageSubjects <- min(max(minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage]) } else { stageSubjects <- sampleSizesPerStage[stage - 1] } return(stageSubjects) } getSimulationMeans(designIN, alternative = 0:4, stDev = 5, plannedSubjects = c(20, 40, 60), minNumberOfSubjectsPerStage = c(NA, 20, 20), maxNumberOfSubjectsPerStage = c(NA, 80, 80), conditionalPower = 0.8, calcSubjectsFunction = mySampleSizeCalculationFunction, maxNumberOfIterations = 50) } } rpact/man/getDesignFisher.Rd0000644000176200001440000001120114427374266015511 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_fisher_combination_test.R \name{getDesignFisher} \alias{getDesignFisher} \title{Get Design Fisher} \usage{ getDesignFisher( ..., kMax = NA_integer_, alpha = NA_real_, method = c("equalAlpha", "fullAlpha", "noInteraction", "userDefinedAlpha"), userAlphaSpending = NA_real_, alpha0Vec = NA_real_, informationRates = NA_real_, sided = 1, bindingFutility = NA, tolerance = 1e-14, iterations = 0, seed = NA_real_ ) } \arguments{ \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{kMax}{The maximum number of stages \code{K}. Must be a positive integer of length 1 (default value is \code{3}). The maximum selectable \code{kMax} is \code{20} for group sequential or inverse normal and \code{6} for Fisher combination test designs.} \item{alpha}{The significance level alpha, default is \code{0.025}. Must be a positive numeric of length 1.} \item{method}{\code{"equalAlpha"}, \code{"fullAlpha"}, \code{"noInteraction"}, or \code{"userDefinedAlpha"}, default is \code{"equalAlpha"} (for details, see Wassmer, 1999).} \item{userAlphaSpending}{The user defined alpha spending. Numeric vector of length \code{kMax} containing the cumulative alpha-spending (Type I error rate) up to each interim stage: \code{0 <= alpha_1 <= ... <= alpha_K <= alpha}.} \item{alpha0Vec}{Stopping for futility bounds for stage-wise p-values.} \item{informationRates}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}.} \item{sided}{Is the alternative one-sided (\code{1}) or two-sided (\code{2}), default is \code{1}. Must be a positive integer of length 1.} \item{bindingFutility}{If \code{bindingFutility = TRUE} is specified the calculation of the critical values is affected by the futility bounds (default is \code{TRUE}).} \item{tolerance}{The numerical tolerance, default is \code{1e-14}.} \item{iterations}{The number of simulation iterations, e.g., \code{getDesignFisher(iterations = 100000)} checks the validity of the critical values for the design. The default value of \code{iterations} is 0, i.e., no simulation will be executed.} \item{seed}{Seed for simulating the power for Fisher's combination test. See above, default is a random seed.} } \value{ Returns a \code{\link{TrialDesign}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.TrialDesign]{plot()}} to plot the object, \item \code{\link[=as.data.frame.TrialDesign]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Performs Fisher's combination test and returns critical values for this design. } \details{ \code{getDesignFisher()} calculates the critical values and stage levels for Fisher's combination test as described in Bauer (1989), Bauer and Koehne (1994), Bauer and Roehmel (1995), and Wassmer (1999) for equally and unequally sized stages. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Calculate critical values for a two-stage Fisher's combination test # with full level alpha = 0.05 at the final stage and stopping for # futility bound alpha0 = 0.50, as described in Bauer and Koehne (1994). getDesignFisher(kMax = 2, method = "fullAlpha", alpha = 0.05, alpha0Vec = 0.50) } \seealso{ \code{\link[=getDesignSet]{getDesignSet()}} for creating a set of designs to compare. Other design functions: \code{\link{getDesignCharacteristics}()}, \code{\link{getDesignConditionalDunnett}()}, \code{\link{getDesignGroupSequential}()}, \code{\link{getDesignInverseNormal}()}, \code{\link{getGroupSequentialProbabilities}()}, \code{\link{getPowerAndAverageSampleNumber}()} } \concept{design functions} rpact/man/getSimulationEnrichmentSurvival.Rd0000644000176200001440000002757014417202031021027 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_enrichment_survival.R \name{getSimulationEnrichmentSurvival} \alias{getSimulationEnrichmentSurvival} \title{Get Simulation Enrichment Survival} \usage{ getSimulationEnrichmentSurvival( design = NULL, ..., effectList = NULL, intersectionTest = c("Simes", "SpiessensDebois", "Bonferroni", "Sidak"), stratifiedAnalysis = TRUE, directionUpper = TRUE, adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), effectMeasure = c("effectEstimate", "testStatistic"), successCriterion = c("all", "atLeastOne"), epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedEvents = NA_real_, allocationRatioPlanned = NA_real_, minNumberOfEventsPerStage = NA_real_, maxNumberOfEventsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, maxNumberOfIterations = 1000L, seed = NA_real_, calcEventsFunction = NULL, selectPopulationsFunction = NULL, showStatistics = FALSE ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{effectList}{List of subsets, prevalences, and effect sizes with columns and number of rows reflecting the different situations to consider (see examples).} \item{intersectionTest}{Defines the multiple test for the intersection hypotheses in the closed system of hypotheses. Four options are available in enrichment designs: \code{"SpiessensDebois"}, \code{"Bonferroni"}, \code{"Simes"}, and \code{"Sidak"}, default is \code{"Simes"}.} \item{stratifiedAnalysis}{Logical. For enrichment designs, typically a stratified analysis should be chosen. For testing rates, also a non-stratified analysis based on overall data can be performed. For survival data, only a stratified analysis is possible (see Brannath et al., 2009), default is \code{TRUE}.} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{adaptations}{A logical vector of length \code{kMax - 1} indicating whether or not an adaptation takes place at interim k, default is \code{rep(TRUE, kMax - 1)}.} \item{typeOfSelection}{The way the treatment arms or populations are selected at interim. Five options are available: \code{"best"}, \code{"rbest"}, \code{"epsilon"}, \code{"all"}, and \code{"userDefined"}, default is \code{"best"}.\cr For \code{"rbest"} (select the \code{rValue} best treatment arms/populations), the parameter \code{rValue} has to be specified, for \code{"epsilon"} (select treatment arm/population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. If \code{"userDefined"} is selected, \code{"selectArmsFunction"} or \code{"selectPopulationsFunction"} has to be specified.} \item{effectMeasure}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (difference for means and rates or ratio for survival) (\code{"effectEstimate"}), default is \code{"effectEstimate"}.} \item{successCriterion}{Defines when the study is stopped for efficacy at interim. Two options are available: \code{"all"} stops the trial if the efficacy criterion is fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim, default is \code{"all"}.} \item{epsilonValue}{For \code{typeOfSelection = "epsilon"} (select treatment arm / population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. Must be a numeric of length 1.} \item{rValue}{For \code{typeOfSelection = "rbest"} (select the \code{rValue} best treatment arms / populations), the parameter \code{rValue} has to be specified.} \item{threshold}{Selection criterion: treatment arm / population is selected only if \code{effectMeasure} exceeds \code{threshold}, default is \code{-Inf}. \code{threshold} can also be a vector of length \code{activeArms} referring to a separate threshold condition over the treatment arms.} \item{plannedEvents}{\code{plannedEvents} is a numeric vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) events in survival designs when the interim stages are planned. For two treatment arms, it is the number of events for both treatment arms. For multi-arm designs, \code{plannedEvents} refers to the overall number of events for the selected arms plus control.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{minNumberOfEventsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{minNumberOfEventsPerStage} with length kMax determines the minimum number of events per stage (i.e., not cumulated), the first element is not taken into account.} \item{maxNumberOfEventsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{maxNumberOfEventsPerStage} with length kMax determines the maximum number of events per stage (i.e., not cumulated), the first element is not taken into account.} \item{conditionalPower}{If \code{conditionalPower} together with \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (or \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} for survival designs) is specified, a sample size recalculation based on the specified conditional power is performed. It is defined as the power for the subsequent stage given the current data. By default, the conditional power will be calculated under the observed effect size. Optionally, you can also specify \code{thetaH1} and \code{stDevH1} (for simulating means), \code{pi1H1} and \code{pi2H1} (for simulating rates), or \code{thetaH1} (for simulating hazard ratios) as parameters under which it is calculated and the sample size recalculation is performed.} \item{thetaH1}{If specified, the value of the alternative under which the conditional power or sample size recalculation calculation is performed. Must be a numeric of length 1.} \item{maxNumberOfIterations}{The number of simulation iterations, default is \code{1000}. Must be a positive integer of length 1.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} \item{calcEventsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, event number recalculation is performed with conditional power and specified \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} (see details and examples).} \item{selectPopulationsFunction}{Optionally, a function can be entered that defines the way of how populations are selected. This function is allowed to depend on \code{effectVector} with length \code{populations} and \code{stage} (see examples).} \item{showStatistics}{Logical. If \code{TRUE}, summary statistics of the simulated data are displayed for the \code{print} command, otherwise the output is suppressed, default is \code{FALSE}.} } \value{ Returns a \code{\link{SimulationResults}} object. The following generics (R generic functions) are available for this object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.SimulationResults]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the simulated power, stopping and selection probabilities, conditional power, and expected sample size for testing hazard ratios in an enrichment design testing situation. In contrast to \code{getSimulationSurvival()} (where survival times are simulated), normally distributed logrank test statistics are simulated. } \details{ At given design the function simulates the power, stopping probabilities, selection probabilities, and expected event number at given number of events, parameter configuration, and population selection rule in the enrichment situation. An allocation ratio can be specified referring to the ratio of number of subjects in the active treatment group as compared to the control group. The definition of \code{thetaH1} makes only sense if \code{kMax} > 1 and if \code{conditionalPower}, \code{minNumberOfEventsPerStage}, and \code{maxNumberOfEventsPerStage} (or \code{calcEventsFunction}) are defined. \code{calcEventsFunction}\cr This function returns the number of events at given conditional power and conditional critical value for specified testing situation. The function might depend on the variables \code{stage}, \code{selectedPopulations}, \code{plannedEvents}, \code{directionUpper}, \code{allocationRatioPlanned}, \code{minNumberOfEventsPerStage}, \code{maxNumberOfEventsPerStage}, \code{conditionalPower}, \code{conditionalCriticalValue}, and \code{overallEffects}. The function has to contain the three-dots argument '...' (see examples). } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ \dontrun{ # Assess a population selection strategy with one subset population and # a survival endpoint. The considered situations are defined through the # event rates yielding a range of hazard ratios in the subsets. Design # with O'Brien and Fleming alpha spending and a reassessment of event # number in the first interim based on conditional power and assumed # hazard ratio using weighted inverse normal combination test. subGroups <- c("S", "R") prevalences <- c(0.40, 0.60) p2 <- c(0.3, 0.4) range1 <- p2[1] + seq(0, 0.3, 0.05) p1 <- c() for (x1 in range1) { p1 <- c(p1, x1, p2[2] + 0.1) } hazardRatios <- log(matrix(1 - p1, byrow = TRUE, ncol = 2)) / matrix(log(1 - p2), byrow = TRUE, ncol = 2, nrow = length(range1)) effectList <- list(subGroups=subGroups, prevalences=prevalences, hazardRatios = hazardRatios) design <- getDesignInverseNormal(informationRates = c(0.3, 0.7, 1), typeOfDesign = "asOF") simResultsPE <- getSimulationEnrichmentSurvival(design, plannedEvents = c(40, 90, 120), effectList = effectList, typeOfSelection = "rbest", rValue = 2, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA, 50, 30), maxNumberOfEventsPerStage = c(NA, 150, 30), thetaH1 = 4 / 3, maxNumberOfIterations = 100) print(simResultsPE) } } rpact/man/ConditionalPowerResultsSurvival.Rd0000644000176200001440000000316114450467342021035 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{ConditionalPowerResultsSurvival} \alias{ConditionalPowerResultsSurvival} \title{Conditional Power Results Survival} \description{ Class for conditional power calculations of survival data } \details{ This object cannot be created directly; use \code{\link{getConditionalPower}} with suitable arguments to create the results of a group sequential or a combination test design. } \section{Fields}{ \describe{ \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{simulated}}{Describes if the power for Fisher's combination test has been simulated. Only applicable when using Fisher designs. Is a logical vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} }} \keyword{internal} rpact/man/getClosedConditionalDunnettTestResults.Rd0000644000176200001440000000737214411251745022323 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_multiarm.R \name{getClosedConditionalDunnettTestResults} \alias{getClosedConditionalDunnettTestResults} \title{Get Closed Conditional Dunnett Test Results} \usage{ getClosedConditionalDunnettTestResults( stageResults, ..., stage = stageResults$stage ) } \arguments{ \item{stageResults}{The results at given stage, obtained from \code{\link[=getStageResults]{getStageResults()}}.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} } \value{ Returns a \code{\link{ClosedCombinationTestResults}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.ParameterSet]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Calculates and returns the results from the closed conditional Dunnett test. } \details{ For performing the conditional Dunnett test the design must be defined through the function \code{\link[=getDesignConditionalDunnett]{getDesignConditionalDunnett()}}.\cr See Koenig et al. (2008) and Wassmer & Brannath (2016), chapter 11 for details of the test procedure. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ \dontrun{ # In a two-stage design a conditional Dunnett test should be performed # where the unconditional second stage p-values should be used for the # test decision. # At the first stage the second treatment arm was dropped. The results of # a closed conditionsal Dunnett test are obtained as follows with the given # data (treatment arm 4 refers to the reference group): data <- getDataset( n1 = c(22, 23), n2 = c(21, NA), n3 = c(20, 25), n4 = c(25, 27), means1 = c(1.63, 1.51), means2 = c(1.4, NA), means3 = c(0.91, 0.95), means4 = c(0.83, 0.75), stds1 = c(1.2, 1.4), stds2 = c(1.3, NA), stds3 = c(1.1, 1.14), stds4 = c(1.02, 1.18)) # For getting the results of the closed test procedure, use the following commands: design <- getDesignConditionalDunnett(secondStageConditioning = FALSE) stageResults <- getStageResults(design, dataInput = data) getClosedConditionalDunnettTestResults(stageResults) } } \seealso{ Other analysis functions: \code{\link{getAnalysisResults}()}, \code{\link{getClosedCombinationTestResults}()}, \code{\link{getConditionalPower}()}, \code{\link{getConditionalRejectionProbabilities}()}, \code{\link{getFinalConfidenceInterval}()}, \code{\link{getFinalPValue}()}, \code{\link{getRepeatedConfidenceIntervals}()}, \code{\link{getRepeatedPValues}()}, \code{\link{getStageResults}()}, \code{\link{getTestActions}()} } \concept{analysis functions} rpact/man/param_maxNumberOfIterations.Rd0000644000176200001440000000072214312324046020072 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_maxNumberOfIterations} \alias{param_maxNumberOfIterations} \title{Parameter Description: Maximum Number Of Iterations} \arguments{ \item{maxNumberOfIterations}{The number of simulation iterations, default is \code{1000}. Must be a positive integer of length 1.} } \description{ Parameter Description: Maximum Number Of Iterations } \keyword{internal} rpact/man/getPowerMeans.Rd0000644000176200001440000001456514417202031015212 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getPowerMeans} \alias{getPowerMeans} \title{Get Power Means} \usage{ getPowerMeans( design = NULL, ..., groups = 2L, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = seq(0, 1, 0.2), stDev = 1, directionUpper = NA, maxNumberOfSubjects = NA_real_, allocationRatioPlanned = NA_real_ ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{normalApproximation}{The type of computation of the p-values. If \code{TRUE}, the variance is assumed to be known, default is \code{FALSE}, i.e., the calculations are performed with the t distribution.} \item{meanRatio}{If \code{TRUE}, the sample size for one-sided testing of H0: \code{mu1 / mu2 = thetaH0} is calculated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{alternative}{The alternative hypothesis value for testing means. This can be a vector of assumed alternatives, default is \code{seq(0, 1, 0.2)} (power calculations) or \code{seq(0.2, 1, 0.2)} (sample size calculations).} \item{stDev}{The standard deviation under which the sample size or power calculation is performed, default is \code{1}. If \code{meanRatio = TRUE} is specified, \code{stDev} defines the coefficient of variation \code{sigma / mu2}. Must be a positive numeric of length 1.} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{maxNumberOfSubjects}{\code{maxNumberOfSubjects > 0} needs to be specified. For two treatment arms, it is the maximum number of subjects for both treatment arms.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} } \value{ Returns a \code{\link{TrialDesignPlan}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.TrialDesignSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.TrialDesignPlan]{plot()}} to plot the object, \item \code{\link[=as.data.frame.TrialDesignPlan]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the power, stopping probabilities, and expected sample size for testing means in one or two samples at given sample size. } \details{ At given design the function calculates the power, stopping probabilities, and expected sample size, for testing means at given sample size. In a two treatment groups design, additionally, an allocation ratio = \code{n1 / n2} can be specified. A null hypothesis value thetaH0 != 0 for testing the difference of two means or \code{thetaH0 != 1} for testing the ratio of two means can be specified. For the specified sample size, critical bounds and stopping for futility bounds are provided at the effect scale (mean, mean difference, or mean ratio, respectively) } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Calculate the power, stopping probabilities, and expected sample size # for testing H0: mu1 - mu2 = 0 in a two-armed design against a range of # alternatives H1: mu1 - m2 = delta, delta = (0, 1, 2, 3, 4, 5), # standard deviation sigma = 8, maximum sample size N = 80 (both treatment # arms), and an allocation ratio n1/n2 = 2. The design is a three stage # O'Brien & Fleming design with non-binding futility bounds (-0.5, 0.5) # for the two interims. The computation takes into account that the t test # is used (normalApproximation = FALSE). getPowerMeans(getDesignGroupSequential(alpha = 0.025, sided = 1, futilityBounds = c(-0.5, 0.5)), groups = 2, alternative = c(0:5), stDev = 8, normalApproximation = FALSE, maxNumberOfSubjects = 80, allocationRatioPlanned = 2) } \seealso{ Other power functions: \code{\link{getPowerRates}()}, \code{\link{getPowerSurvival}()} } \concept{power functions} rpact/man/dataMultiArmSurvival.Rd0000644000176200001440000000106414313321260016541 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataMultiArmSurvival} \alias{dataMultiArmSurvival} \title{Multi-Arm Dataset of Survival Data} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataMultiArmSurvival } \description{ A dataset containing the log-rank statistics, events, and allocation ratios of three groups. Use \code{getDataset(dataMultiArmSurvival)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/TrialDesignCharacteristics.Rd0000644000176200001440000000416014450467342017700 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesignCharacteristics} \alias{TrialDesignCharacteristics} \title{Trial Design Characteristics} \description{ Class for trial design characteristics. } \details{ \code{TrialDesignCharacteristics} contains all fields required to collect the characteristics of a design. This object should not be created directly; use \code{getDesignCharacteristics} with suitable arguments to create it. } \section{Fields}{ \describe{ \item{\code{nFixed}}{The sample size in a fixed (one-stage) design. Is a positive numeric vector.} \item{\code{shift}}{The shift value for group sequential test characteristics. Is a numeric vector of length 1.} \item{\code{inflationFactor}}{The relative increase of maximum sample size in a group sequential design as compared to the fixed sample size case. Is a positive numeric vector of length 1.} \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{information}}{The information over stages needed to achieve power of the specified design. Is a numeric vector of length \code{kMax}.} \item{\code{power}}{The one-sided power at each stage of the trial. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{rejectionProbabilities}}{The rejection probabilities over treatments arms or populations and stages. Is a numeric vector.} \item{\code{futilityProbabilities}}{The overall probabilities of stopping the trial for futility. Is a numeric vector of length \code{kMax} minus 1 containing values between 0 and 1.} \item{\code{averageSampleNumber1}}{The expected sample size under H1. Is a positive numeric vector of length 1.} \item{\code{averageSampleNumber01}}{The expected sample size for a value between H0 and H1. Is a positive numeric vector of length 1.} \item{\code{averageSampleNumber0}}{The expected sample size under H0. Is a positive numeric vector of length 1.} }} \seealso{ \code{\link{getDesignCharacteristics}} for getting the design characteristics. } \keyword{internal} rpact/man/param_gED50.Rd0000644000176200001440000000062414335631011014410 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_gED50} \alias{param_gED50} \title{Parameter Description: G ED50} \arguments{ \item{gED50}{If \code{typeOfShape = "sigmoidEmax"} is selected, \code{"gED50"} has to be entered to specify the ED50 of the sigmoid Emax model.} } \description{ Parameter Description: G ED50 } \keyword{internal} rpact/man/resetLogLevel.Rd0000644000176200001440000000120614335631006015201 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_logger.R \name{resetLogLevel} \alias{resetLogLevel} \title{Reset Log Level} \usage{ resetLogLevel() } \description{ Resets the \code{rpact} log level. } \details{ This function resets the log level of the \code{rpact} internal log message system to the default value \code{"PROGRESS"}. } \examples{ \dontrun{ # reset log level to default value resetLogLevel() } } \seealso{ \itemize{ \item \code{\link[=getLogLevel]{getLogLevel()}} for getting the current log level, \item \code{\link[=setLogLevel]{setLogLevel()}} for setting the log level. } } \keyword{internal} rpact/man/StageResultsSurvival.Rd0000644000176200001440000000547514450467342016632 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsSurvival} \alias{StageResultsSurvival} \title{Stage Results of Survival Data} \description{ Class for stage results survival data. } \details{ This object cannot be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of survival data. } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{overallTestStatistics}}{The overall, i.e., cumulated test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{separatePValues}}{The p-values from the separate stages. Is a numeric matrix.} \item{\code{singleStepAdjustedPValues}}{The adjusted p-value for testing multiple hypotheses per stage of the trial.} \item{\code{overallPValues}}{The overall, i.e., cumulated p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{direction}}{Specifies the direction of the alternative, is either "upper" or "lower". Only applicable for one-sided testing.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{weightsFisher}}{The weights for Fisher's combination test. Is a numeric vector of length \code{kMax}.} \item{\code{weightsInverseNormal}}{The weights for the inverse normal statistic. Is a numeric vector of length \code{kMax}.} \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{...}}{Names of \code{dataInput}.} }} \keyword{internal} rpact/man/TrialDesignPlan.Rd0000644000176200001440000000072414412737172015460 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \docType{class} \name{TrialDesignPlan} \alias{TrialDesignPlan} \title{Basic Trial Design Plan} \description{ Basic class for trial design plans. } \details{ \code{TrialDesignPlan} is the basic class for \itemize{ \item \code{\link{TrialDesignPlanMeans}}, \item \code{\link{TrialDesignPlanRates}}, and \item \code{\link{TrialDesignPlanSurvival}}. } } \keyword{internal} rpact/man/dataRates.Rd0000644000176200001440000000074014313321260014331 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataRates} \alias{dataRates} \title{One-Arm Dataset of Rates} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataRates } \description{ A dataset containing the sample sizes and events of one group. Use \code{getDataset(dataRates)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/getSampleSizeRates.Rd0000644000176200001440000001363214372411347016213 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getSampleSizeRates} \alias{getSampleSizeRates} \title{Get Sample Size Rates} \usage{ getSampleSizeRates( design = NULL, ..., groups = 2, normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = c(0.4, 0.5, 0.6), pi2 = 0.2, allocationRatioPlanned = NA_real_ ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{normalApproximation}{If \code{FALSE}, the sample size for the case of one treatment group is calculated exactly using the binomial distribution, default is \code{TRUE}.} \item{riskRatio}{If \code{TRUE}, the sample size for one-sided testing of H0: \code{pi1 / pi2 = thetaH0} is calculated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{pi1}{A numeric value or vector that represents the assumed probability in the active treatment group if two treatment groups are considered, or the alternative probability for a one treatment group design, default is \code{seq(0.2, 0.5, 0.1)} (power calculations and simulations) or \code{seq(0.4, 0.6, 0.1)} (sample size calculations).} \item{pi2}{A numeric value that represents the assumed probability in the reference group if two treatment groups are considered, default is \code{0.2}.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the smallest overall sample size is determined.} } \value{ Returns a \code{\link{TrialDesignPlan}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.TrialDesignSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.TrialDesignPlan]{plot()}} to plot the object, \item \code{\link[=as.data.frame.TrialDesignPlan]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the sample size for testing rates in one or two samples. } \details{ At given design the function calculates the stage-wise (non-cumulated) and maximum sample size for testing rates. In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. If a null hypothesis value thetaH0 != 0 for testing the difference of two rates thetaH0 != 1 for testing the risk ratio is specified, the sample size formula according to Farrington & Manning (Statistics in Medicine, 1990) is used. Critical bounds and stopping for futility bounds are provided at the effect scale (rate, rate difference, or rate ratio, respectively) for each sample size calculation separately. For the two-sample case, the calculation here is performed at fixed pi2 as given as argument in the function. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Calculate the stage-wise sample sizes, maximum sample sizes, and the optimum # allocation ratios for a range of pi1 values when testing # H0: pi1 - pi2 = -0.1 within a two-stage O'Brien & Fleming design; # alpha = 0.05 one-sided, power 1 - beta = 90\%: getSampleSizeRates(getDesignGroupSequential(kMax = 2, alpha = 0.05, beta = 0.1), groups = 2, thetaH0 = -0.1, pi1 = seq(0.4, 0.55, 0.025), pi2 = 0.4, allocationRatioPlanned = 0) \dontrun{ # Calculate the stage-wise sample sizes, maximum sample sizes, and the optimum # allocation ratios for a range of pi1 values when testing # H0: pi1 / pi2 = 0.80 within a three-stage O'Brien & Fleming design; # alpha = 0.025 one-sided, power 1 - beta = 90\%: getSampleSizeRates(getDesignGroupSequential(kMax = 3, alpha = 0.025, beta = 0.1), groups = 2, riskRatio = TRUE, thetaH0 = 0.80, pi1 = seq(0.3, 0.5, 0.025), pi2 = 0.3, allocationRatioPlanned = 0) } } \seealso{ Other sample size functions: \code{\link{getSampleSizeMeans}()}, \code{\link{getSampleSizeSurvival}()} } \concept{sample size functions} rpact/man/setLogLevel.Rd0000644000176200001440000000222514335631006014654 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_logger.R \name{setLogLevel} \alias{setLogLevel} \title{Set Log Level} \usage{ setLogLevel( logLevel = c("PROGRESS", "ERROR", "WARN", "INFO", "DEBUG", "TRACE", "DISABLED") ) } \arguments{ \item{logLevel}{The new log level to set. Can be one of "PROGRESS", "ERROR", "WARN", "INFO", "DEBUG", "TRACE", "DISABLED". Default is "PROGRESS".} } \description{ Sets the \code{rpact} log level. } \details{ This function sets the log level of the \code{rpact} internal log message system. By default only calculation progress messages will be shown on the output console, particularly \code{\link[=getAnalysisResults]{getAnalysisResults()}} shows this kind of messages. The output of these messages can be disabled by setting the log level to \code{"DISABLED"}. } \examples{ \dontrun{ # show debug messages setLogLevel("DEBUG") # disable all log messages setLogLevel("DISABLED") } } \seealso{ \itemize{ \item \code{\link[=getLogLevel]{getLogLevel()}} for getting the current log level, \item \code{\link[=resetLogLevel]{resetLogLevel()}} for resetting the log level to default. } } \keyword{internal} rpact/man/param_sided.Rd0000644000176200001440000000061514312324046014676 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_sided} \alias{param_sided} \title{Parameter Description: Sided} \arguments{ \item{sided}{Is the alternative one-sided (\code{1}) or two-sided (\code{2}), default is \code{1}. Must be a positive integer of length 1.} } \description{ Parameter Description: Sided } \keyword{internal} rpact/man/AnalysisResultsEnrichment.Rd0000644000176200001440000000100514335631007017605 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsEnrichment} \alias{AnalysisResultsEnrichment} \title{Basic Class for Analysis Results Enrichment} \description{ A basic class for enrichment analysis results. } \details{ \code{AnalysisResultsEnrichment} is the basic class for \itemize{ \item \code{\link{AnalysisResultsEnrichmentFisher}} and \item \code{\link{AnalysisResultsEnrichmentInverseNormal}}. } } \keyword{internal} rpact/man/param_stratifiedAnalysis.Rd0000644000176200001440000000115614335631011017447 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_stratifiedAnalysis} \alias{param_stratifiedAnalysis} \title{Parameter Description: Stratified Analysis} \arguments{ \item{stratifiedAnalysis}{Logical. For enrichment designs, typically a stratified analysis should be chosen. For testing rates, also a non-stratified analysis based on overall data can be performed. For survival data, only a stratified analysis is possible (see Brannath et al., 2009), default is \code{TRUE}.} } \description{ Parameter Description: Stratified Analysis } \keyword{internal} rpact/man/param_dropoutRate2.Rd0000644000176200001440000000057714232463333016213 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_dropoutRate2} \alias{param_dropoutRate2} \title{Parameter Description: Dropout Rate (2)} \arguments{ \item{dropoutRate2}{The assumed drop-out rate in the control group, default is \code{0}.} } \description{ Parameter Description: Dropout Rate (2) } \keyword{internal} rpact/man/kable.Rd0000644000176200001440000000103114447544614013513 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{kable} \alias{kable} \title{Create tables in Markdown} \usage{ kable(x, ...) } \arguments{ \item{x}{The object that inherits from \code{\link{ParameterSet}}.} \item{...}{Other arguments (see \code{\link[knitr]{kable}}).} } \description{ The \code{kable()} function returns a single table for a single object that inherits from class \code{\link{ParameterSet}}. } \details{ Generic to represent a parameter set in Markdown. } rpact/man/AnalysisResultsMultiArm.Rd0000644000176200001440000000106114335631007017245 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsMultiArm} \alias{AnalysisResultsMultiArm} \title{Basic Class for Analysis Results Multi-Arm} \description{ A basic class for multi-arm analysis results. } \details{ \code{AnalysisResultsMultiArm} is the basic class for \itemize{ \item \code{\link{AnalysisResultsMultiArmFisher}}, \item \code{\link{AnalysisResultsMultiArmInverseNormal}}, and \item \code{\link{AnalysisResultsConditionalDunnett}}. } } \keyword{internal} rpact/man/AnalysisResultsConditionalDunnett.Rd0000644000176200001440000000756714450467342021350 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsConditionalDunnett} \alias{AnalysisResultsConditionalDunnett} \title{Analysis Results Multi-Arm Conditional Dunnett} \description{ Class for multi-arm analysis results based on a conditional Dunnett test design. } \details{ This object cannot be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the multi-arm analysis results of a conditional Dunnett test design. } \section{Fields}{ \describe{ \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{pi1}}{The assumed probability or probabilities in the active treatment group in two-group designs, or the alternative probability for a one-group design.} \item{\code{pi2}}{The assumed probability in the reference group for two-group designs. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{assumedStDevs}}{Assumed standard deviations to calculate conditional power in multi-arm trials or enrichment designs. Is a numeric vector.} \item{\code{piTreatments}}{The assumed rates in the treatment groups for multi-arm and enrichment designs, i.e., designs with multiple subsets.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple (i.e., >2) treatment arms or population enrichment designs when testing means. Available options for multiple arms: \code{"overallPooled", "pairwisePooled", "notPooled"}. Available options for enrichment designs: \code{"pooled", "pooledFromFull", "notPooled"}.} \item{\code{conditionalRejectionProbabilities}}{The probabilities of rejecting the null hypothesis at each stage, given the stage has been reached. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{repeatedConfidenceIntervalLowerBounds}}{The lower bound of the confidence intervals that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedConfidenceIntervalUpperBounds}}{The upper bound of the confidence interval that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedPValues}}{The p-values that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{piControl}}{The assumed probability in the control arm for simulation and under which the sample size recalculation is performed. Is a numeric vector of length 1 containing a value between 0 and 1.} }} \keyword{internal} rpact/man/TrialDesignInverseNormal.Rd0000644000176200001440000001260014450467342017347 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesignInverseNormal} \alias{TrialDesignInverseNormal} \title{Inverse Normal Design} \description{ Trial design for inverse normal method. } \details{ This object should not be created directly; use \code{\link[=getDesignInverseNormal]{getDesignInverseNormal()}} with suitable arguments to create a inverse normal design. } \section{Fields}{ \describe{ \item{\code{kMax}}{The maximum number of stages \code{K}. Is a numeric vector of length 1 containing a whole number.} \item{\code{alpha}}{The significance level alpha, default is 0.025. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{informationRates}}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{userAlphaSpending}}{The user defined alpha spending. Contains the cumulative alpha-spending (type I error rate) up to each interim stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{criticalValues}}{The critical values for each stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{stageLevels}}{The adjusted significance levels to reach significance in a group sequential design. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{alphaSpent}}{The cumulative alpha spent at each stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{bindingFutility}}{If \code{TRUE}, the calculation of the critical values is affected by the futility bounds and the futility threshold is binding in the sense that the study must be stopped if the futility condition was reached (default is \code{FALSE}) Is a logical vector of length 1.} \item{\code{tolerance}}{The numerical tolerance, default is \code{1e-06}. Is a numeric vector of length 1.} \item{\code{typeOfDesign}}{The type of design. Is a character vector of length 1.} \item{\code{beta}}{The Type II error rate necessary for providing sample size calculations (e.g., in \code{getSampleSizeMeans}), beta spending function designs, or optimum designs, default is \code{0.20}. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{deltaWT}}{Delta for Wang & Tsiatis Delta class. Is a numeric vector of length 1.} \item{\code{deltaPT1}}{Delta1 for Pampallona & Tsiatis class rejecting H0 boundaries. Is a numeric vector of length 1.} \item{\code{deltaPT0}}{Delta0 for Pampallona & Tsiatis class rejecting H1 (accepting H0) boundaries. Is a numeric vector of length 1.} \item{\code{futilityBounds}}{The futility bounds for each stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{gammaA}}{The parameter for the alpha spending function. Is a numeric vector of length 1.} \item{\code{gammaB}}{The parameter for the beta spending function. Is a numeric vector of length 1.} \item{\code{optimizationCriterion}}{The optimization criterion for optimum design within the Wang & Tsiatis class (\code{"ASNH1"}, \code{"ASNIFH1"}, \code{"ASNsum"}), default is \code{"ASNH1"}.} \item{\code{sided}}{Describes if the alternative is one-sided (\code{1}) or two-sided (\code{2}). Is a numeric vector of length 1 containing a whole number.} \item{\code{betaSpent}}{The cumulative beta level spent at each stage of the trial. Only applicable for beta-spending designs. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{typeBetaSpending}}{The type of beta spending. Is a character vector of length 1.} \item{\code{userBetaSpending}}{The user defined beta spending. Contains the cumulative beta-spending up to each interim stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{power}}{The one-sided power at each stage of the trial. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{twoSidedPower}}{Specifies if power is defined two-sided at each stage of the trial. Is a logical vector of length 1.} \item{\code{constantBoundsHP}}{The constant bounds up to stage kMax - 1 for the Haybittle & Peto design (default is 3). Is a numeric vector of length 1.} \item{\code{betaAdjustment}}{If \code{TRUE}, beta spending values are linearly adjusted if an overlapping of decision regions for futility stopping at earlier stages occurs. Only applicable for two-sided beta-spending designs. Is a logical vector of length 1.} \item{\code{delayedInformation}}{Delay of information for delayed response designs. Is a numeric vector of length \code{kMax} minus 1 containing values between 0 and 1.} \item{\code{decisionCriticalValues}}{The decision critical values for each stage of the trial in a delayed response design. Is a numeric vector of length \code{kMax}.} \item{\code{reversalProbabilities}}{The probability to switch from stopping the trial for success (or futility) and reaching non-rejection (or rejection) in a delayed response design. Is a numeric vector of length \code{kMax} minus 1 containing values between 0 and 1.} }} \seealso{ \code{\link[=getDesignInverseNormal]{getDesignInverseNormal()}} for creating a inverse normal design. } \keyword{internal} rpact/man/param_hazardRatio.Rd0000644000176200001440000000104014335631010016044 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_hazardRatio} \alias{param_hazardRatio} \title{Parameter Description: Hazard Ratio} \arguments{ \item{hazardRatio}{The vector of hazard ratios under consideration. If the event or hazard rates in both treatment groups are defined, the hazard ratio needs not to be specified as it is calculated, there is no default. Must be a positive numeric of length 1.} } \description{ Parameter Description: Hazard Ratio } \keyword{internal} rpact/man/param_pi1_survival.Rd0000644000176200001440000000103514335631010016224 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_pi1_survival} \alias{param_pi1_survival} \title{Parameter Description: Pi (1) for Survival Data} \arguments{ \item{pi1}{A numeric value or vector that represents the assumed event rate in the treatment group, default is \code{seq(0.2, 0.5, 0.1)} (power calculations and simulations) or \code{seq(0.4, 0.6, 0.1)} (sample size calculations).} } \description{ Parameter Description: Pi (1) for Survival Data } \keyword{internal} rpact/man/dataMultiArmMeans.Rd0000644000176200001440000000103114313321260015763 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataMultiArmMeans} \alias{dataMultiArmMeans} \title{Multi-Arm Dataset of Means} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataMultiArmMeans } \description{ A dataset containing the sample sizes, means, and standard deviations of four groups. Use \code{getDataset(dataMultiArmMeans)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/param_typeOfComputation.Rd0000644000176200001440000000114714335631010017275 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_typeOfComputation} \alias{param_typeOfComputation} \title{Parameter Description: Type Of Computation} \arguments{ \item{typeOfComputation}{Three options are available: \code{"Schoenfeld"}, \code{"Freedman"}, \code{"HsiehFreedman"}, the default is \code{"Schoenfeld"}. For details, see Hsieh (Statistics in Medicine, 1992). For non-inferiority testing (i.e., \code{thetaH0 != 1}), only Schoenfeld's formula can be used.} } \description{ Parameter Description: Type Of Computation } \keyword{internal} rpact/man/getSimulationEnrichmentMeans.Rd0000644000176200001440000003245014417202031020250 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_enrichment_means.R \name{getSimulationEnrichmentMeans} \alias{getSimulationEnrichmentMeans} \title{Get Simulation Enrichment Means} \usage{ getSimulationEnrichmentMeans( design = NULL, ..., effectList = NULL, intersectionTest = c("Simes", "SpiessensDebois", "Bonferroni", "Sidak"), stratifiedAnalysis = TRUE, adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), effectMeasure = c("effectEstimate", "testStatistic"), successCriterion = c("all", "atLeastOne"), epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedSubjects = NA_integer_, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, stDevH1 = NA_real_, maxNumberOfIterations = 1000L, seed = NA_real_, calcSubjectsFunction = NULL, selectPopulationsFunction = NULL, showStatistics = FALSE ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{effectList}{List of subsets, prevalences, and effect sizes with columns and number of rows reflecting the different situations to consider (see examples).} \item{intersectionTest}{Defines the multiple test for the intersection hypotheses in the closed system of hypotheses. Four options are available in enrichment designs: \code{"SpiessensDebois"}, \code{"Bonferroni"}, \code{"Simes"}, and \code{"Sidak"}, default is \code{"Simes"}.} \item{stratifiedAnalysis}{Logical. For enrichment designs, typically a stratified analysis should be chosen. For testing rates, also a non-stratified analysis based on overall data can be performed. For survival data, only a stratified analysis is possible (see Brannath et al., 2009), default is \code{TRUE}.} \item{adaptations}{A logical vector of length \code{kMax - 1} indicating whether or not an adaptation takes place at interim k, default is \code{rep(TRUE, kMax - 1)}.} \item{typeOfSelection}{The way the treatment arms or populations are selected at interim. Five options are available: \code{"best"}, \code{"rbest"}, \code{"epsilon"}, \code{"all"}, and \code{"userDefined"}, default is \code{"best"}.\cr For \code{"rbest"} (select the \code{rValue} best treatment arms/populations), the parameter \code{rValue} has to be specified, for \code{"epsilon"} (select treatment arm/population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. If \code{"userDefined"} is selected, \code{"selectArmsFunction"} or \code{"selectPopulationsFunction"} has to be specified.} \item{effectMeasure}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (difference for means and rates or ratio for survival) (\code{"effectEstimate"}), default is \code{"effectEstimate"}.} \item{successCriterion}{Defines when the study is stopped for efficacy at interim. Two options are available: \code{"all"} stops the trial if the efficacy criterion is fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim, default is \code{"all"}.} \item{epsilonValue}{For \code{typeOfSelection = "epsilon"} (select treatment arm / population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. Must be a numeric of length 1.} \item{rValue}{For \code{typeOfSelection = "rbest"} (select the \code{rValue} best treatment arms / populations), the parameter \code{rValue} has to be specified.} \item{threshold}{Selection criterion: treatment arm / population is selected only if \code{effectMeasure} exceeds \code{threshold}, default is \code{-Inf}. \code{threshold} can also be a vector of length \code{activeArms} referring to a separate threshold condition over the treatment arms.} \item{plannedSubjects}{\code{plannedSubjects} is a numeric vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs, \code{plannedSubjects} refers to the number of subjects per selected active arm.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{minNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{minNumberOfSubjectsPerStage} with length kMax determines the minimum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{minNumberOfSubjectsPerStage} refers to the minimum number of subjects per selected active arm.} \item{maxNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{maxNumberOfSubjectsPerStage} with length kMax determines the maximum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{maxNumberOfSubjectsPerStage} refers to the maximum number of subjects per selected active arm.} \item{conditionalPower}{If \code{conditionalPower} together with \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (or \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} for survival designs) is specified, a sample size recalculation based on the specified conditional power is performed. It is defined as the power for the subsequent stage given the current data. By default, the conditional power will be calculated under the observed effect size. Optionally, you can also specify \code{thetaH1} and \code{stDevH1} (for simulating means), \code{pi1H1} and \code{pi2H1} (for simulating rates), or \code{thetaH1} (for simulating hazard ratios) as parameters under which it is calculated and the sample size recalculation is performed.} \item{thetaH1}{If specified, the value of the alternative under which the conditional power or sample size recalculation calculation is performed. Must be a numeric of length 1.} \item{stDevH1}{If specified, the value of the standard deviation under which the conditional power or sample size recalculation calculation is performed, default is the value of \code{stDev}. Must be a positive numeric of length 1.} \item{maxNumberOfIterations}{The number of simulation iterations, default is \code{1000}. Must be a positive integer of length 1.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} \item{calcSubjectsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, sample size recalculation is performed with conditional power and specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details and examples).} \item{selectPopulationsFunction}{Optionally, a function can be entered that defines the way of how populations are selected. This function is allowed to depend on \code{effectVector} with length \code{populations} and \code{stage} (see examples).} \item{showStatistics}{Logical. If \code{TRUE}, summary statistics of the simulated data are displayed for the \code{print} command, otherwise the output is suppressed, default is \code{FALSE}.} } \value{ Returns a \code{\link{SimulationResults}} object. The following generics (R generic functions) are available for this object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.SimulationResults]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the simulated power, stopping and selection probabilities, conditional power, and expected sample size or testing means in an enrichment design testing situation. } \details{ At given design the function simulates the power, stopping probabilities, selection probabilities, and expected sample size at given number of subjects, parameter configuration, and population selection rule in the enrichment situation. An allocation ratio can be specified referring to the ratio of number of subjects in the active treatment groups as compared to the control group. The definition of \code{thetaH1} and/or \code{stDevH1} makes only sense if \code{kMax} > 1 and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. \code{calcSubjectsFunction}\cr This function returns the number of subjects at given conditional power and conditional critical value for specified testing situation. The function might depend on the variables \code{stage}, \code{selectedPopulations}, \code{plannedSubjects}, \code{allocationRatioPlanned}, \code{minNumberOfSubjectsPerStage}, \code{maxNumberOfSubjectsPerStage}, \code{conditionalPower}, \code{conditionalCriticalValue}, \code{overallEffects}, and \code{stDevH1}. The function has to contain the three-dots argument '...' (see examples). } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ \dontrun{ # Assess a population selection strategy with one subset population. # If the subset is better than the full population, then the subset # is selected for the second stage, otherwise the full. Print and plot # design characteristics. # Define design designIN <- getDesignInverseNormal(kMax = 2) # Define subgroups and their prevalences subGroups <- c("S", "R") # fixed names! prevalences <- c(0.2, 0.8) # Define effect matrix and variability effectR <- 0.2 m <- c() for (effectS in seq(0, 0.5, 0.25)) { m <- c(m, effectS, effectR) } effects <- matrix(m, byrow = TRUE, ncol = 2) stDev <- c(0.4, 0.8) # Define effect list effectList <- list(subGroups=subGroups, prevalences=prevalences, stDevs = stDev, effects = effects) # Perform simulation simResultsPE <- getSimulationEnrichmentMeans(design = designIN, effectList = effectList, plannedSubjects = c(50, 100), maxNumberOfIterations = 100) print(simResultsPE) # Assess the design characteristics of a user defined selection # strategy in a three-stage design with no interim efficacy stop # using the inverse normal method for combining the stages. # Only the second interim is used for a selecting of a study # population. There is a small probability for stopping the trial # at the first interim. # Define design designIN2 <- getDesignInverseNormal(typeOfDesign = "noEarlyEfficacy", kMax = 3) # Define selection function mySelection <- function(effectVector, stage) { selectedPopulations <- rep(TRUE, 3) if (stage == 2) { selectedPopulations <- (effectVector >= c(1, 2, 3)) } return(selectedPopulations) } # Define subgroups and their prevalences subGroups <- c("S1", "S12", "S2", "R") # fixed names! prevalences <- c(0.2, 0.3, 0.4, 0.1) effectR <- 1.5 effectS12 = 5 m <- c() for (effectS1 in seq(0, 5, 1)) { for (effectS2 in seq(0, 5, 1)) { m <- c(m, effectS1, effectS12, effectS2, effectR) } } effects <- matrix(m, byrow = TRUE, ncol = 4) stDev <- 10 # Define effect list effectList <- list(subGroups=subGroups, prevalences=prevalences, stDevs = stDev, effects = effects) # Perform simulation simResultsPE <- getSimulationEnrichmentMeans( design = designIN2, effectList = effectList, typeOfSelection = "userDefined", selectPopulationsFunction = mySelection, intersectionTest = "Simes", plannedSubjects = c(50, 100, 150), maxNumberOfIterations = 100) print(simResultsPE) if (require(ggplot2)) plot(simResultsPE, type = 3) } } rpact/man/getPowerAndAverageSampleNumber.Rd0000644000176200001440000000555514335631010020460 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_group_sequential.R \name{getPowerAndAverageSampleNumber} \alias{getPowerAndAverageSampleNumber} \title{Get Power And Average Sample Number} \usage{ getPowerAndAverageSampleNumber(design, theta = seq(-1, 1, 0.02), nMax = 100) } \arguments{ \item{design}{The trial design.} \item{theta}{A vector of standardized effect sizes (theta values), default is a sequence from -1 to 1.} \item{nMax}{The maximum sample size. Must be a positive integer of length 1.} } \value{ Returns a \code{\link{PowerAndAverageSampleNumberResult}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.ParameterSet]{plot()}} to plot the object, \item \code{\link[=as.data.frame.PowerAndAverageSampleNumberResult]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the power and average sample number of the specified design. } \details{ This function returns the power and average sample number (ASN) of the specified design for the prototype case which is testing H0: mu = mu0 in a one-sample design. \code{theta} represents the standardized effect \code{(mu - mu0) / sigma} and power and ASN is calculated for maximum sample size \code{nMax}. For other designs than the one-sample test of a mean the standardized effect needs to be adjusted accordingly. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Calculate power, stopping probabilities, and expected sample # size for the default design with specified theta and nMax getPowerAndAverageSampleNumber( getDesignGroupSequential(), theta = seq(-1, 1, 0.5), nMax = 100) } \seealso{ Other design functions: \code{\link{getDesignCharacteristics}()}, \code{\link{getDesignConditionalDunnett}()}, \code{\link{getDesignFisher}()}, \code{\link{getDesignGroupSequential}()}, \code{\link{getDesignInverseNormal}()}, \code{\link{getGroupSequentialProbabilities}()} } \concept{design functions} rpact/man/as.data.frame.PowerAndAverageSampleNumberResult.Rd0000644000176200001440000000265014335631007023561 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_power_and_asn.R \name{as.data.frame.PowerAndAverageSampleNumberResult} \alias{as.data.frame.PowerAndAverageSampleNumberResult} \title{Coerce Power And Average Sample Number Result to a Data Frame} \usage{ \method{as.data.frame}{PowerAndAverageSampleNumberResult}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ... ) } \arguments{ \item{x}{A \code{\link{PowerAndAverageSampleNumberResult}} object.} \item{niceColumnNamesEnabled}{Logical. If \code{TRUE}, nice looking column names will be used; syntactic names (variable names) otherwise (see \code{\link[base]{make.names}}).} \item{includeAllParameters}{Logical. If \code{TRUE}, all available parameters will be included in the data frame; a meaningful parameter selection otherwise, default is \code{FALSE}.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} } \value{ Returns a \code{\link[base]{data.frame}}. } \description{ Returns the \code{\link{PowerAndAverageSampleNumberResult}} as data frame. } \details{ Coerces the \code{\link{PowerAndAverageSampleNumberResult}} object to a data frame. } \examples{ data <- as.data.frame(getPowerAndAverageSampleNumber(getDesignGroupSequential())) head(data) dim(data) } \keyword{internal} rpact/man/EventProbabilities.Rd0000644000176200001440000000477614450467343016247 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_event_probabilities.R \docType{class} \name{EventProbabilities} \alias{EventProbabilities} \title{Event Probabilities} \description{ Class for the definition of event probabilities. } \details{ \code{EventProbabilities} is a class for the definition of event probabilities. } \section{Fields}{ \describe{ \item{\code{time}}{The time values. Is a numeric vector.} \item{\code{accrualTime}}{The assumed accrual time intervals for the study. Is a numeric vector.} \item{\code{accrualIntensity}}{The absolute accrual intensities. Is a numeric vector of length \code{kMax}.} \item{\code{kappa}}{The shape of the Weibull distribution if \code{kappa!=1}. Is a numeric vector of length 1.} \item{\code{piecewiseSurvivalTime}}{The time intervals for the piecewise definition of the exponential survival time cumulative distribution function. Is a numeric vector.} \item{\code{lambda1}}{The assumed hazard rate in the treatment group. Is a numeric vector of length \code{kMax}.} \item{\code{lambda2}}{The assumed hazard rate in the reference group. Is a numeric vector of length 1.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{hazardRatio}}{The hazard ratios under consideration. Is a numeric vector of length \code{kMax}.} \item{\code{dropoutRate1}}{The assumed drop-out rate in the treatment group. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{dropoutRate2}}{The assumed drop-out rate in the control group. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{dropoutTime}}{The assumed time for drop-out rates in the control and treatment group. Is a numeric vector of length 1.} \item{\code{maxNumberOfSubjects}}{The maximum number of subjects for power calculations. Is a numeric vector.} \item{\code{overallEventProbabilities}}{Deprecated field which will be removed in one of the next releases. Use \code{cumulativeEventProbabilities} instead.} \item{\code{cumulativeEventProbabilities}}{The cumulative event probabilities in survival designs. Is a numeric vector.} \item{\code{eventProbabilities1}}{The event probabilities in treatment group 1. Is a numeric vector.} \item{\code{eventProbabilities2}}{The event probabilities in treatment group 2. Is a numeric vector.} }} \keyword{internal} rpact/man/AnalysisResultsFisher.Rd0000644000176200001440000001055214450467342016747 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsFisher} \alias{AnalysisResultsFisher} \title{Analysis Results Fisher} \description{ Class for analysis results based on a Fisher combination test design. } \details{ This object cannot be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the analysis results of a Fisher combination test design. } \section{Fields}{ \describe{ \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{pi1}}{The assumed probability or probabilities in the active treatment group in two-group designs, or the alternative probability for a one-group design.} \item{\code{pi2}}{The assumed probability in the reference group for two-group designs. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{assumedStDev}}{The assumed standard deviation(s) for means analysis. Is a numeric vector.} \item{\code{equalVariances}}{Describes if the variances in two treatment groups are assumed to be the same. Is a logical vector of length 1.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{conditionalRejectionProbabilities}}{The probabilities of rejecting the null hypothesis at each stage, given the stage has been reached. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{repeatedConfidenceIntervalLowerBounds}}{The lower bound of the confidence intervals that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedConfidenceIntervalUpperBounds}}{The upper bound of the confidence interval that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedPValues}}{The p-values that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{finalStage}}{The stage at which the trial ends, either with acceptance or rejection of the null hypothesis. Is a numeric vector of length 1.} \item{\code{finalPValues}}{The final p-value that is based on the stage-wise ordering. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{finalConfidenceIntervalLowerBounds}}{The lower bound of the confidence interval that is based on the stage-wise ordering. Is a numeric vector of length \code{kMax}.} \item{\code{finalConfidenceIntervalUpperBounds}}{The upper bound of the confidence interval that is based on the stage-wise ordering. Is a numeric vector of length \code{kMax}.} \item{\code{medianUnbiasedEstimates}}{The calculated median unbiased estimates that are based on the stage-wise ordering. Is a numeric vector of length \code{kMax}.} \item{\code{conditionalPowerSimulated}}{The simulated conditional power, under the assumption of observed or assumed effect sizes.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} }} \keyword{internal} rpact/man/as.data.frame.TrialDesign.Rd0000644000176200001440000000232314335631006017236 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \name{as.data.frame.TrialDesign} \alias{as.data.frame.TrialDesign} \title{Coerce TrialDesign to a Data Frame} \usage{ \method{as.data.frame}{TrialDesign}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ... ) } \arguments{ \item{x}{A \code{\link{TrialDesign}} object.} \item{niceColumnNamesEnabled}{Logical. If \code{TRUE}, nice looking column names will be used; syntactic names (variable names) otherwise (see \code{\link[base]{make.names}}).} \item{includeAllParameters}{Logical. If \code{TRUE}, all available parameters will be included in the data frame; a meaningful parameter selection otherwise, default is \code{FALSE}.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} } \value{ Returns a \code{\link[base]{data.frame}}. } \description{ Returns the \code{TrialDesign} as data frame. } \details{ Each element of the \code{\link{TrialDesign}} is converted to a column in the data frame. } \examples{ as.data.frame(getDesignGroupSequential()) } \keyword{internal} rpact/man/getNumberOfSubjects.Rd0000644000176200001440000000663414335631010016352 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getNumberOfSubjects} \alias{getNumberOfSubjects} \title{Get Number Of Subjects} \usage{ getNumberOfSubjects( time, ..., accrualTime = c(0, 12), accrualIntensity = 0.1, accrualIntensityType = c("auto", "absolute", "relative"), maxNumberOfSubjects = NA_real_ ) } \arguments{ \item{time}{A numeric vector with time values.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{accrualTime}{The assumed accrual time intervals for the study, default is \code{c(0, 12)} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{accrualIntensity}{A numeric vector of accrual intensities, default is the relative intensity \code{0.1} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{accrualIntensityType}{A character value specifying the accrual intensity input type. Must be one of \code{"auto"}, \code{"absolute"}, or \code{"relative"}; default is \code{"auto"}, i.e., if all values are < 1 the type is \code{"relative"}, otherwise it is \code{"absolute"}.} \item{maxNumberOfSubjects}{If \code{maxNumberOfSubjects > 0} is specified, the end of accrual at specified \code{accrualIntensity} for the specified number of subjects is determined or \code{accrualIntensity} is calculated at fixed end of accrual.} } \value{ Returns a \code{\link{NumberOfSubjects}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.NumberOfSubjects]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the number of recruited subjects at given time vector. } \details{ Calculate number of subjects over time range at given accrual time vector and accrual intensity. Intensity can either be defined in absolute or relative terms (for the latter, \code{maxNumberOfSubjects} needs to be defined)\cr The function is used by \code{\link[=getSampleSizeSurvival]{getSampleSizeSurvival()}}. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ getNumberOfSubjects(time = seq(10, 70, 10), accrualTime = c(0, 20, 60), accrualIntensity = c(5, 20)) getNumberOfSubjects(time = seq(10, 70, 10), accrualTime = c(0, 20, 60), accrualIntensity = c(0.1, 0.4), maxNumberOfSubjects = 900) } \seealso{ \code{\link{AccrualTime}} for defining the accrual time. } rpact/man/setOutputFormat.Rd0000644000176200001440000000706114372411347015624 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_output_formats.R \name{setOutputFormat} \alias{setOutputFormat} \title{Set Output Format} \usage{ setOutputFormat( parameterName = NA_character_, ..., digits = NA_integer_, nsmall = NA_integer_, trimSingleZeros = NA, futilityProbabilityEnabled = NA, file = NA_character_, resetToDefault = FALSE, roundFunction = NA_character_ ) } \arguments{ \item{parameterName}{The name of the parameter whose output format shall be edited. Leave the default \code{NA_character_} if the output format of all parameters shall be edited.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{digits}{How many significant digits are to be used for a numeric value. The default, \code{NULL}, uses getOption("digits"). Allowed values are \code{0 <= digits <= 20}.} \item{nsmall}{The minimum number of digits to the right of the decimal point in formatting real numbers in non-scientific formats. Allowed values are \code{0 <= nsmall <= 20}.} \item{trimSingleZeros}{If \code{TRUE} zero values will be trimmed in the output, e.g., "0.00" will displayed as "0"} \item{futilityProbabilityEnabled}{If \code{TRUE} very small value (< 1e-09) will be displayed as "0", default is \code{FALSE}.} \item{file}{An optional file name of an existing text file that contains output format definitions (see Details for more information).} \item{resetToDefault}{If \code{TRUE} all output formats will be reset to default value. Note that other settings will be executed afterwards if specified, default is \code{FALSE}.} \item{roundFunction}{A character value that specifies the R base round function to use, default is \code{NA_character_}. Allowed values are "ceiling", "floor", "trunc", "round", "signif", and \code{NA_character_}.} } \description{ With this function the format of the standard outputs of all \code{rpact} objects can be changed and set user defined respectively. } \details{ Output formats can be written to a text file (see \code{\link[=getOutputFormat]{getOutputFormat()}}). To load your personal output formats read a formerly saved file at the beginning of your work with \code{rpact}, e.g. execute \code{setOutputFormat(file = "my_rpact_output_formats.txt")}. Note that the \code{parameterName} must not match exactly, e.g., for p-values the following parameter names will be recognized amongst others: \enumerate{ \item \code{p value} \item \code{p.values} \item \code{p-value} \item \code{pValue} \item \code{rpact.output.format.p.value} } } \examples{ # show output format of p values getOutputFormat("p.value") \dontrun{ # set new p value output format setOutputFormat("p.value", digits = 5, nsmall = 5) # show sample sizes as smallest integers not less than the not rounded values setOutputFormat("sample size", digits = 0, nsmall = 0, roundFunction = "ceiling") getSampleSizeMeans() # show sample sizes as smallest integers not greater than the not rounded values setOutputFormat("sample size", digits = 0, nsmall = 0, roundFunction = "floor") getSampleSizeMeans() # set new sample size output format without round function setOutputFormat("sample size", digits = 2, nsmall = 2) getSampleSizeMeans() # reset sample size output format to default setOutputFormat("sample size") getSampleSizeMeans() getOutputFormat("sample size") } } \seealso{ \code{\link[base]{format}} for details on the function used internally to format the values. Other output formats: \code{\link{getOutputFormat}()} } \concept{output formats} rpact/man/PowerAndAverageSampleNumberResult.Rd0000644000176200001440000000317414450467343021170 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_power_and_asn.R \docType{class} \name{PowerAndAverageSampleNumberResult} \alias{PowerAndAverageSampleNumberResult} \title{Power and Average Sample Number Result} \description{ Class for power and average sample number (ASN) results. } \details{ This object cannot be created directly; use \code{\link[=getPowerAndAverageSampleNumber]{getPowerAndAverageSampleNumber()}} with suitable arguments to create it. } \section{Fields}{ \describe{ \item{\code{nMax}}{The maximum sample size. Is a numeric vector of length 1 containing a whole number.} \item{\code{theta}}{A vector of standardized effect sizes (theta values). Is a numeric vector.} \item{\code{averageSampleNumber}}{The average sample number calculated for each value of \code{theta} or \code{nMax}, if the specified maximum sample size would be exceeded. Is a numeric vector.} \item{\code{calculatedPower}}{The calculated power for the given scenario.} \item{\code{overallEarlyStop}}{The overall early stopping probability. Is a numeric vector.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{overallReject}}{The overall rejection probability. Is a numeric vector.} \item{\code{rejectPerStage}}{The probability to reject a hypothesis per stage of the trial. Is a numeric matrix.} \item{\code{overallFutility}}{The overall stopping for futility probability. Is a numeric vector.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} }} \keyword{internal} rpact/man/getAccrualTime.Rd0000644000176200001440000001464214400317257015330 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_time.R \name{getAccrualTime} \alias{getAccrualTime} \title{Get Accrual Time} \usage{ getAccrualTime( accrualTime = NA_real_, ..., accrualIntensity = NA_real_, accrualIntensityType = c("auto", "absolute", "relative"), maxNumberOfSubjects = NA_real_ ) } \arguments{ \item{accrualTime}{The assumed accrual time intervals for the study, default is \code{c(0, 12)} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{accrualIntensity}{A numeric vector of accrual intensities, default is the relative intensity \code{0.1} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{accrualIntensityType}{A character value specifying the accrual intensity input type. Must be one of \code{"auto"}, \code{"absolute"}, or \code{"relative"}; default is \code{"auto"}, i.e., if all values are < 1 the type is \code{"relative"}, otherwise it is \code{"absolute"}.} \item{maxNumberOfSubjects}{The maximum number of subjects.} } \value{ Returns an \code{\link{AccrualTime}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.ParameterSet]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns an \code{AccrualTime} object that contains the accrual time and the accrual intensity. } \section{Staggered patient entry}{ \code{accrualTime} is the time period of subjects' accrual in a study. It can be a value that defines the end of accrual or a vector. In this case, \code{accrualTime} can be used to define a non-constant accrual over time. For this, \code{accrualTime} is a vector that defines the accrual intervals. The first element of \code{accrualTime} must be equal to \code{0} and, additionally, \code{accrualIntensity} needs to be specified. \code{accrualIntensity} itself is a value or a vector (depending on the length of \code{accrualTime}) that defines the intensity how subjects enter the trial in the intervals defined through \code{accrualTime}. \code{accrualTime} can also be a list that combines the definition of the accrual time and accrual intensity (see below and examples for details). If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to be specified and the end of accrual is calculated. In that case, \code{accrualIntensity} is the number of subjects per time unit, i.e., the absolute accrual intensity. If the length of \code{accrualTime} equals the length of \code{accrualIntensity - 1} (i.e., the end of accrual is defined), \code{maxNumberOfSubjects} is calculated if the absolute accrual intensity is given. If all elements in \code{accrualIntensity} are smaller than 1, \code{accrualIntensity} defines the \emph{relative} intensity how subjects enter the trial. For example, \code{accrualIntensity = c(0.1, 0.2)} specifies that in the second accrual interval the intensity is doubled as compared to the first accrual interval. The actual (absolute) accrual intensity is calculated for the calculated or given \code{maxNumberOfSubjects}. Note that the default is \code{accrualIntensity = 0.1} meaning that the \emph{absolute} accrual intensity will be calculated. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ \dontrun{ # Assume that in a trial the accrual after the first 6 months is doubled # and the total accrual time is 30 months. # Further assume that a total of 1000 subjects are entered in the trial. # The number of subjects to be accrued in the first 6 months and afterwards # is achieved through getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.1, 0.2), maxNumberOfSubjects = 1000) # The same result is obtained via the list based definition getAccrualTime(list( "0 - <6" = 0.1, "6 - <=30" = 0.2), maxNumberOfSubjects = 1000) # Calculate the end of accrual at given absolute intensity: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(18, 36), maxNumberOfSubjects = 1000) # Via the list based definition this is getAccrualTime(list( "0 - <6" = 18, ">=6" = 36), maxNumberOfSubjects = 1000) # You can use an accrual time object in getSampleSizeSurvival() or # getPowerSurvival(). # For example, if the maximum number of subjects and the follow up # time needs to be calculated for a given effect size: accrualTime = getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.1, 0.2)) getSampleSizeSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2) # Or if the power and follow up time needs to be calculated for given # number of events and subjects: accrualTime = getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.1, 0.2), maxNumberOfSubjects = 110) getPowerSurvival(accrualTime = accrualTime, pi1 = 0.4, pi2 = 0.2, maxNumberOfEvents = 46) # How to show accrual time details # You can use a sample size or power object as argument for the function # getAccrualTime(): sampleSize <- getSampleSizeSurvival(accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.05, hazardRatio = 0.8, followUpTime = 6) sampleSize accrualTime <- getAccrualTime(sampleSize) accrualTime } } \seealso{ \code{\link[=getNumberOfSubjects]{getNumberOfSubjects()}} for calculating the number of subjects at given time points. } rpact/man/SimulationResultsRates.Rd0000644000176200001440000001136714450467343017154 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsRates} \alias{SimulationResultsRates} \title{Class for Simulation Results Rates} \description{ A class for simulation results rates. } \details{ Use \code{\link[=getSimulationRates]{getSimulationRates()}} to create an object of this type. \code{SimulationResultsRates} is the basic class for \itemize{ \item \code{\link{SimulationResultsRates}}, \item \code{\link{SimulationResultsMultiArmRates}}, and \item \code{\link{SimulationResultsEnrichmentRates}}. } } \section{Fields}{ \describe{ \item{\code{maxNumberOfIterations}}{The number of simulation iterations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{plannedSubjects}}{Determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, refers to the number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{maxNumberOfSubjects}}{The maximum number of subjects for power calculations. Is a numeric vector.} \item{\code{calcSubjectsFunction}}{An optional function that can be entered to define how sample size is recalculated. By default, recalculation is performed with conditional power with specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage}.} \item{\code{expectedNumberOfSubjects}}{The expected number of subjects under specified alternative.} \item{\code{riskRatio}}{Specifies if the sample size for one-sided testing of H0: \code{pi1 / pi2 = thetaH0} has been calculated. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{pi1}}{The assumed probability or probabilities in the active treatment group in two-group designs, or the alternative probability for a one-group design.} \item{\code{pi2}}{The assumed probability in the reference group for two-group designs. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{groups}}{The group numbers. Is a numeric vector.} \item{\code{pi1H1}}{The assumed probability in the active treatment group for two-group designs, or the assumed probability for a one treatment group design, for which the conditional power was calculated. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{pi2H1}}{The assumed probability in the reference group for two-group designs, for which the conditional power was calculated. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{effect}}{The effect for randomly creating normally distributed responses. Is a numeric vector of length \code{kMax}.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{sampleSizes}}{The sample sizes for each group and stage. Is a numeric vector of length number of stages times number of groups containing whole numbers.} \item{\code{overallReject}}{The overall rejection probability. Is a numeric vector.} \item{\code{rejectPerStage}}{The probability to reject a hypothesis per stage of the trial. Is a numeric matrix.} \item{\code{conditionalPowerAchieved}}{The calculated conditional power, under the assumption of observed or assumed effect sizes. Is a numeric matrix.} }} \keyword{internal} rpact/man/getParameterCaption.Rd0000644000176200001440000000150214313321256016361 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_utilities.R \name{getParameterCaption} \alias{getParameterCaption} \title{Get Parameter Caption} \usage{ getParameterCaption(obj, parameterName) } \value{ Returns a \code{\link[base]{character}} of specifying the corresponding caption of a given parameter name. Returns \code{NULL} if the specified \code{parameterName} does not exist. } \description{ Returns the parameter caption for a given object and parameter name. } \details{ This function identifies and returns the caption that will be used in print outputs of an rpact result object. } \examples{ getParameterCaption(getDesignInverseNormal(), "kMax") } \seealso{ \code{\link[=getParameterName]{getParameterName()}} for getting the parameter name for a given caption. } \keyword{internal} rpact/man/as.data.frame.TrialDesignCharacteristics.Rd0000644000176200001440000000252614335631006022277 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \name{as.data.frame.TrialDesignCharacteristics} \alias{as.data.frame.TrialDesignCharacteristics} \title{Coerce TrialDesignCharacteristics to a Data Frame} \usage{ \method{as.data.frame}{TrialDesignCharacteristics}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ... ) } \arguments{ \item{x}{A \code{\link{TrialDesignCharacteristics}} object.} \item{niceColumnNamesEnabled}{Logical. If \code{TRUE}, nice looking column names will be used; syntactic names (variable names) otherwise (see \code{\link[base]{make.names}}).} \item{includeAllParameters}{Logical. If \code{TRUE}, all available parameters will be included in the data frame; a meaningful parameter selection otherwise, default is \code{FALSE}.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} } \value{ Returns a \code{\link[base]{data.frame}}. } \description{ Returns the \code{TrialDesignCharacteristics} as data frame. } \details{ Each element of the \code{\link{TrialDesignCharacteristics}} is converted to a column in the data frame. } \examples{ as.data.frame(getDesignCharacteristics(getDesignGroupSequential())) } \keyword{internal} rpact/man/AnalysisResultsMultiArmInverseNormal.Rd0000644000176200001440000000754514450467342021776 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsMultiArmInverseNormal} \alias{AnalysisResultsMultiArmInverseNormal} \title{Analysis Results Multi-Arm Inverse Normal} \description{ Class for multi-arm analysis results based on a inverse normal design. } \details{ This object cannot be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the multi-arm analysis results of an inverse normal design. } \section{Fields}{ \describe{ \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{pi1}}{The assumed probability or probabilities in the active treatment group in two-group designs, or the alternative probability for a one-group design.} \item{\code{pi2}}{The assumed probability in the reference group for two-group designs. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{assumedStDevs}}{Assumed standard deviations to calculate conditional power in multi-arm trials or enrichment designs. Is a numeric vector.} \item{\code{piTreatments}}{The assumed rates in the treatment groups for multi-arm and enrichment designs, i.e., designs with multiple subsets.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple (i.e., >2) treatment arms or population enrichment designs when testing means. Available options for multiple arms: \code{"overallPooled", "pairwisePooled", "notPooled"}. Available options for enrichment designs: \code{"pooled", "pooledFromFull", "notPooled"}.} \item{\code{conditionalRejectionProbabilities}}{The probabilities of rejecting the null hypothesis at each stage, given the stage has been reached. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{repeatedConfidenceIntervalLowerBounds}}{The lower bound of the confidence intervals that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedConfidenceIntervalUpperBounds}}{The upper bound of the confidence interval that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedPValues}}{The p-values that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{piControl}}{The assumed probability in the control arm for simulation and under which the sample size recalculation is performed. Is a numeric vector of length 1 containing a value between 0 and 1.} }} \keyword{internal} rpact/man/ConditionalPowerResultsEnrichmentRates.Rd0000644000176200001440000000341414450467342022316 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{ConditionalPowerResultsEnrichmentRates} \alias{ConditionalPowerResultsEnrichmentRates} \title{Conditional Power Results Enrichment Rates} \description{ Class for conditional power calculations of enrichment rates data } \details{ This object cannot be created directly; use \code{\link{getConditionalPower}} with suitable arguments to create the results of a group sequential or a combination test design. } \section{Fields}{ \describe{ \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{simulated}}{Describes if the power for Fisher's combination test has been simulated. Only applicable when using Fisher designs. Is a logical vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{piTreatments}}{The assumed rates in the treatment groups for multi-arm and enrichment designs, i.e., designs with multiple subsets.} \item{\code{piControls}}{The assumed rates in the control group for enrichment designs, i.e., designs with multiple subsets.} }} \keyword{internal} rpact/man/param_minNumberOfSubjectsPerStage.Rd0000644000176200001440000000152014335631011021157 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_minNumberOfSubjectsPerStage} \alias{param_minNumberOfSubjectsPerStage} \title{Parameter Description: Minimum Number Of Subjects Per Stage} \arguments{ \item{minNumberOfSubjectsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{minNumberOfSubjectsPerStage} with length kMax determines the minimum number of subjects per stage (i.e., not cumulated), the first element is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs \code{minNumberOfSubjectsPerStage} refers to the minimum number of subjects per selected active arm.} } \description{ Parameter Description: Minimum Number Of Subjects Per Stage } \keyword{internal} rpact/man/param_kMax.Rd0000644000176200001440000000104614335631010014502 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_kMax} \alias{param_kMax} \title{Parameter Description: Maximum Number of Stages} \arguments{ \item{kMax}{The maximum number of stages \code{K}. Must be a positive integer of length 1 (default value is \code{3}). The maximum selectable \code{kMax} is \code{20} for group sequential or inverse normal and \code{6} for Fisher combination test designs.} } \description{ Parameter Description: Maximum Number of Stages } \keyword{internal} rpact/man/param_plannedSubjects.Rd0000644000176200001440000000127714335631011016735 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_plannedSubjects} \alias{param_plannedSubjects} \title{Parameter Description: Planned Subjects} \arguments{ \item{plannedSubjects}{\code{plannedSubjects} is a numeric vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, it is the number of subjects for both treatment arms. For multi-arm designs, \code{plannedSubjects} refers to the number of subjects per selected active arm.} } \description{ Parameter Description: Planned Subjects } \keyword{internal} rpact/man/summary.Dataset.Rd0000644000176200001440000000540514335631006015513 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{summary.Dataset} \alias{summary.Dataset} \title{Dataset Summary} \usage{ \method{summary}{Dataset}(object, ..., type = 1, digits = NA_integer_) } \arguments{ \item{object}{A \code{\link{Dataset}} object.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{digits}{Defines how many digits are to be used for numeric values. Must be a positive integer of length 1.} } \value{ Returns a \code{\link{SummaryFactory}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object } } \description{ Displays a summary of \code{\link{Dataset}} object. } \details{ Summarizes the parameters and results of a dataset. } \section{Summary options}{ The following options can be set globally: \enumerate{ \item \code{rpact.summary.output.size}: one of \code{c("small", "medium", "large")}; defines how many details will be included into the summary; default is \code{"large"}, i.e., all available details are displayed. \item \code{rpact.summary.justify}: one of \code{c("right", "left", "centre")}; shall the values be right-justified (the default), left-justified or centered. \item \code{rpact.summary.width}: defines the maximum number of characters to be used per line (default is \code{83}). \item \code{rpact.summary.intervalFormat}: defines how intervals will be displayed in the summary, default is \code{"[\%s; \%s]"}. \item \code{rpact.summary.digits}: defines how many digits are to be used for numeric values (default is \code{3}). \item \code{rpact.summary.digits.probs}: defines how many digits are to be used for numeric values (default is one more than value of \code{rpact.summary.digits}, i.e., \code{4}). \item \code{rpact.summary.trim.zeroes}: if \code{TRUE} (default) zeroes will always displayed as "0", e.g. "0.000" will become "0". } Example: \code{options("rpact.summary.intervalFormat" = "\%s - \%s")} } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \keyword{internal} rpact/man/getClosedCombinationTestResults.Rd0000644000176200001440000000610614411251745020752 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_multiarm.R \name{getClosedCombinationTestResults} \alias{getClosedCombinationTestResults} \title{Get Closed Combination Test Results} \usage{ getClosedCombinationTestResults(stageResults) } \arguments{ \item{stageResults}{The results at given stage, obtained from \code{\link[=getStageResults]{getStageResults()}}.} } \value{ Returns a \code{\link{ClosedCombinationTestResults}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.ParameterSet]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Calculates and returns the results from the closed combination test in multi-arm and population enrichment designs. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ \dontrun{ # In a four-stage combination test design with O'Brien & Fleming boundaries # at the first stage the second treatment arm was dropped. With the Bonferroni # intersection test, the results of a closed adaptive test procedure are # obtained as follows with the given data (treatment arm 4 refers to the # reference group): data <- getDataset( n1 = c(22, 23), n2 = c(21, NA), n3 = c(20, 25), n4 = c(25, 27), means1 = c(1.63, 1.51), means2 = c(1.4, NA), means3 = c(0.91, 0.95), means4 = c(0.83, 0.75), stds1 = c(1.2, 1.4), stds2 = c(1.3, NA), stds3 = c(1.1, 1.14), stds4 = c(1.02, 1.18)) design <- getDesignInverseNormal(kMax = 4) stageResults <- getStageResults(design, dataInput = data, intersectionTest = "Bonferroni") getClosedCombinationTestResults(stageResults) } } \seealso{ Other analysis functions: \code{\link{getAnalysisResults}()}, \code{\link{getClosedConditionalDunnettTestResults}()}, \code{\link{getConditionalPower}()}, \code{\link{getConditionalRejectionProbabilities}()}, \code{\link{getFinalConfidenceInterval}()}, \code{\link{getFinalPValue}()}, \code{\link{getRepeatedConfidenceIntervals}()}, \code{\link{getRepeatedPValues}()}, \code{\link{getStageResults}()}, \code{\link{getTestActions}()} } \concept{analysis functions} rpact/man/getSimulationMultiArmSurvival.Rd0000644000176200001440000003275614445304766020512 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_multiarm_survival.R \name{getSimulationMultiArmSurvival} \alias{getSimulationMultiArmSurvival} \title{Get Simulation Multi-Arm Survival} \usage{ getSimulationMultiArmSurvival( design = NULL, ..., activeArms = 3L, effectMatrix = NULL, typeOfShape = c("linear", "sigmoidEmax", "userDefined"), omegaMaxVector = seq(1, 2.6, 0.4), gED50 = NA_real_, slope = 1, intersectionTest = c("Dunnett", "Bonferroni", "Simes", "Sidak", "Hierarchical"), directionUpper = TRUE, adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), effectMeasure = c("effectEstimate", "testStatistic"), successCriterion = c("all", "atLeastOne"), correlationComputation = c("alternative", "null"), epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedEvents = NA_real_, allocationRatioPlanned = NA_real_, minNumberOfEventsPerStage = NA_real_, maxNumberOfEventsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, maxNumberOfIterations = 1000L, seed = NA_real_, calcEventsFunction = NULL, selectArmsFunction = NULL, showStatistics = FALSE ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{activeArms}{The number of active treatment arms to be compared with control, default is \code{3}.} \item{effectMatrix}{Matrix of effect sizes with \code{activeArms} columns and number of rows reflecting the different situations to consider.} \item{typeOfShape}{The shape of the dose-response relationship over the treatment groups. This can be either \code{"linear"}, \code{"sigmoidEmax"}, or \code{"userDefined"}, default is \code{"linear"}.\cr For \code{"linear"}, \code{"muMaxVector"} specifies the range of effect sizes for the treatment group with highest response. If \code{"sigmoidEmax"} is selected, \code{"gED50"} and \code{"slope"} has to be entered to specify the ED50 and the slope of the sigmoid Emax model. For \code{"sigmoidEmax"}, \code{"muMaxVector"} specifies the range of effect sizes for the treatment group with response according to infinite dose. If \code{"userDefined"} is selected, \code{"effectMatrix"} has to be entered.} \item{omegaMaxVector}{Range of hazard ratios with highest response for \code{"linear"} and \code{"sigmoidEmax"} model, default is \code{seq(1, 2.6, 0.4)}.} \item{gED50}{If \code{typeOfShape = "sigmoidEmax"} is selected, \code{"gED50"} has to be entered to specify the ED50 of the sigmoid Emax model.} \item{slope}{If \code{typeOfShape = "sigmoidEmax"} is selected, \code{"slope"} can be entered to specify the slope of the sigmoid Emax model, default is 1.} \item{intersectionTest}{Defines the multiple test for the intersection hypotheses in the closed system of hypotheses. Five options are available in multi-arm designs: \code{"Dunnett"}, \code{"Bonferroni"}, \code{"Simes"}, \code{"Sidak"}, and \code{"Hierarchical"}, default is \code{"Dunnett"}.} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{adaptations}{A logical vector of length \code{kMax - 1} indicating whether or not an adaptation takes place at interim k, default is \code{rep(TRUE, kMax - 1)}.} \item{typeOfSelection}{The way the treatment arms or populations are selected at interim. Five options are available: \code{"best"}, \code{"rbest"}, \code{"epsilon"}, \code{"all"}, and \code{"userDefined"}, default is \code{"best"}.\cr For \code{"rbest"} (select the \code{rValue} best treatment arms/populations), the parameter \code{rValue} has to be specified, for \code{"epsilon"} (select treatment arm/population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. If \code{"userDefined"} is selected, \code{"selectArmsFunction"} or \code{"selectPopulationsFunction"} has to be specified.} \item{effectMeasure}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (difference for means and rates or ratio for survival) (\code{"effectEstimate"}), default is \code{"effectEstimate"}.} \item{successCriterion}{Defines when the study is stopped for efficacy at interim. Two options are available: \code{"all"} stops the trial if the efficacy criterion is fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim, default is \code{"all"}.} \item{correlationComputation}{If \code{correlationComputation = "alternative"}, for simulating log-rank statistics in the many-to-one design, a correlation matrix according to Deng et al. (Biometrics, 2019) accounting for the respective alternative is used; if \code{correlationComputation = "null"}, a constant correlation matrix valid under the null, i.e., not accounting for the alternative is used, default is \code{"alternative"}.} \item{epsilonValue}{For \code{typeOfSelection = "epsilon"} (select treatment arm / population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. Must be a numeric of length 1.} \item{rValue}{For \code{typeOfSelection = "rbest"} (select the \code{rValue} best treatment arms / populations), the parameter \code{rValue} has to be specified.} \item{threshold}{Selection criterion: treatment arm / population is selected only if \code{effectMeasure} exceeds \code{threshold}, default is \code{-Inf}. \code{threshold} can also be a vector of length \code{activeArms} referring to a separate threshold condition over the treatment arms.} \item{plannedEvents}{\code{plannedEvents} is a numeric vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) events in survival designs when the interim stages are planned. For two treatment arms, it is the number of events for both treatment arms. For multi-arm designs, \code{plannedEvents} refers to the overall number of events for the selected arms plus control.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{minNumberOfEventsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{minNumberOfEventsPerStage} with length kMax determines the minimum number of events per stage (i.e., not cumulated), the first element is not taken into account.} \item{maxNumberOfEventsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{maxNumberOfEventsPerStage} with length kMax determines the maximum number of events per stage (i.e., not cumulated), the first element is not taken into account.} \item{conditionalPower}{If \code{conditionalPower} together with \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (or \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} for survival designs) is specified, a sample size recalculation based on the specified conditional power is performed. It is defined as the power for the subsequent stage given the current data. By default, the conditional power will be calculated under the observed effect size. Optionally, you can also specify \code{thetaH1} and \code{stDevH1} (for simulating means), \code{pi1H1} and \code{pi2H1} (for simulating rates), or \code{thetaH1} (for simulating hazard ratios) as parameters under which it is calculated and the sample size recalculation is performed.} \item{thetaH1}{If specified, the value of the alternative under which the conditional power or sample size recalculation calculation is performed. Must be a numeric of length 1.} \item{maxNumberOfIterations}{The number of simulation iterations, default is \code{1000}. Must be a positive integer of length 1.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} \item{calcEventsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, event number recalculation is performed with conditional power and specified \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} (see details and examples).} \item{selectArmsFunction}{Optionally, a function can be entered that defines the way of how treatment arms are selected. This function is allowed to depend on \code{effectVector} with length \code{activeArms} and \code{stage} (see examples).} \item{showStatistics}{Logical. If \code{TRUE}, summary statistics of the simulated data are displayed for the \code{print} command, otherwise the output is suppressed, default is \code{FALSE}.} } \value{ Returns a \code{\link{SimulationResults}} object. The following generics (R generic functions) are available for this object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.SimulationResults]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the simulated power, stopping and selection probabilities, conditional power, and expected sample size for testing hazard ratios in a multi-arm treatment groups testing situation. In contrast to \code{getSimulationSurvival()} (where survival times are simulated), normally distributed logrank test statistics are simulated. } \details{ At given design the function simulates the power, stopping probabilities, selection probabilities, and expected sample size at given number of subjects, parameter configuration, and treatment arm selection rule in the multi-arm situation. An allocation ratio can be specified referring to the ratio of number of subjects in the active treatment groups as compared to the control group. The definition of \code{thetaH1} makes only sense if \code{kMax} > 1 and if \code{conditionalPower}, \code{minNumberOfEventsPerStage}, and \code{maxNumberOfEventsPerStage} (or \code{calcEventsFunction}) are defined. \code{calcEventsFunction}\cr This function returns the number of events at given conditional power and conditional critical value for specified testing situation. The function might depend on the variables \code{stage}, \code{selectedArms}, \code{plannedEvents}, \code{directionUpper}, \code{allocationRatioPlanned}, \code{minNumberOfEventsPerStage}, \code{maxNumberOfEventsPerStage}, \code{conditionalPower}, \code{conditionalCriticalValue}, and \code{overallEffects}. The function has to contain the three-dots argument '...' (see examples). } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ \dontrun{ # Assess different selection rules for a two-stage survival design with # O'Brien & Fleming alpha spending boundaries and (non-binding) stopping # for futility if the test statistic is negative. # Number of events at the second stage is adjusted based on conditional # power 80\% and specified minimum and maximum number of Events. design <- getDesignInverseNormal(typeOfDesign = "asOF", futilityBounds = 0) y1 <- getSimulationMultiArmSurvival(design = design, activeArms = 4, intersectionTest = "Simes", typeOfShape = "sigmoidEmax", omegaMaxVector = seq(1, 2, 0.5), gED50 = 2, slope = 4, typeOfSelection = "best", conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 30), maxNumberOfEventsPerStage = c(NA_real_, 90), maxNumberOfIterations = 50, plannedEvents = c(75, 120)) y2 <- getSimulationMultiArmSurvival(design = design, activeArms = 4, intersectionTest = "Simes", typeOfShape = "sigmoidEmax", omegaMaxVector = seq(1,2,0.5), gED50 = 2, slope = 4, typeOfSelection = "epsilon", epsilonValue = 0.2, effectMeasure = "effectEstimate", conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 30), maxNumberOfEventsPerStage = c(NA_real_, 90), maxNumberOfIterations = 50, plannedEvents = c(75, 120)) y1$effectMatrix y1$rejectAtLeastOne y2$rejectAtLeastOne y1$selectedArms y2$selectedArms } } rpact/man/plot.TrialDesignPlan.Rd0000644000176200001440000001165314372411347016436 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_plan.R \name{plot.TrialDesignPlan} \alias{plot.TrialDesignPlan} \title{Trial Design Plan Plotting} \usage{ \method{plot}{TrialDesignPlan}( x, y, ..., main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = ifelse(x$.design$kMax == 1, 5L, 1L), palette = "Set1", theta = seq(-1, 1, 0.01), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, grid = 1, plotSettings = NULL ) } \arguments{ \item{x}{The trial design plan, obtained from \cr \code{\link[=getSampleSizeMeans]{getSampleSizeMeans()}}, \cr \code{\link[=getSampleSizeRates]{getSampleSizeRates()}}, \cr \code{\link[=getSampleSizeSurvival]{getSampleSizeSurvival()}}, \cr \code{\link[=getPowerMeans]{getPowerMeans()}}, \cr \code{\link[=getPowerRates]{getPowerRates()}} or \cr \code{\link[=getPowerSurvival]{getPowerSurvival()}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{type}{The plot type (default = \code{1}). The following plot types are available: \itemize{ \item \code{1}: creates a 'Boundaries' plot \item \code{2}: creates a 'Boundaries Effect Scale' plot \item \code{3}: creates a 'Boundaries p Values Scale' plot \item \code{4}: creates a 'Error Spending' plot \item \code{5}: creates a 'Sample Size' or 'Overall Power and Early Stopping' plot \item \code{6}: creates a 'Number of Events' or 'Sample Size' plot \item \code{7}: creates an 'Overall Power' plot \item \code{8}: creates an 'Overall Early Stopping' plot \item \code{9}: creates an 'Expected Number of Events' or 'Expected Sample Size' plot \item \code{10}: creates a 'Study Duration' plot \item \code{11}: creates an 'Expected Number of Subjects' plot \item \code{12}: creates an 'Analysis Times' plot \item \code{13}: creates a 'Cumulative Distribution Function' plot \item \code{14}: creates a 'Survival Function' plot \item \code{"all"}: creates all available plots and returns it as a grid plot or list }} \item{palette}{The palette, default is \code{"Set1"}.} \item{theta}{A vector of standardized effect sizes (theta values), default is a sequence from -1 to 1.} \item{plotPointsEnabled}{Logical. If \code{TRUE}, additional points will be plotted.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{Logical. If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with the base R \code{plot} function. Alternatively \code{showSource} can be defined as one of the following character values: \itemize{ \item \code{"commands"}: returns a character vector with plot commands \item \code{"axes"}: returns a list with the axes definitions \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function does not stop if an error occurs) \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function stops if an error occurs) } Note: no plot object will be returned if \code{showSource} is a character.} \item{grid}{An integer value specifying the output of multiple plots. By default (\code{1}) a list of \code{ggplot} objects will be returned. If a \code{grid} value > 1 was specified, a grid plot will be returned if the number of plots is <= specified \code{grid} value; a list of \code{ggplot} objects will be returned otherwise. If \code{grid = 0} is specified, all plots will be created using \code{\link[base]{print}} command and a list of \code{ggplot} objects will be returned invisible. Note that one of the following packages must be installed to create a grid plot: 'ggpubr', 'gridExtra', or 'cowplot'.} \item{plotSettings}{An object of class \code{PlotSettings} created by \code{\link[=getPlotSettings]{getPlotSetting()s}}.} } \value{ Returns a \code{ggplot2} object. } \description{ Plots a trial design plan. } \details{ Generic function to plot all kinds of trial design plans. } \examples{ \dontrun{ if (require(ggplot2)) plot(getSampleSizeMeans()) } } rpact/man/SimulationResultsMeans.Rd0000644000176200001440000001210614450467343017131 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsMeans} \alias{SimulationResultsMeans} \title{Class for Simulation Results Means} \description{ A class for simulation results means. } \details{ Use \code{\link[=getSimulationMeans]{getSimulationMeans()}} to create an object of this type. \code{SimulationResultsMeans} is the basic class for \itemize{ \item \code{\link{SimulationResultsMeans}}, \item \code{\link{SimulationResultsMultiArmMeans}}, and \item \code{\link{SimulationResultsEnrichmentMeans}}. } } \section{Fields}{ \describe{ \item{\code{maxNumberOfIterations}}{The number of simulation iterations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{stDev}}{The standard deviation used for sample size and power calculation. Is a numeric vector of length 1.} \item{\code{plannedSubjects}}{Determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, refers to the number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{minNumberOfSubjectsPerStage}}{Determines the minimum number of subjects per stage for data-driven sample size recalculation. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, is the minimum number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{maxNumberOfSubjectsPerStage}}{Determines the maximum number of subjects per stage for data-driven sample size recalculation. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, is the minimum number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{stDevH1}}{The standard deviation under which the conditional power or sample size recalculation is performed. Is a numeric vector of length 1.} \item{\code{calcSubjectsFunction}}{An optional function that can be entered to define how sample size is recalculated. By default, recalculation is performed with conditional power with specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage}.} \item{\code{expectedNumberOfSubjects}}{The expected number of subjects under specified alternative.} \item{\code{meanRatio}}{Specifies if the sample size for one-sided testing of H0: \code{mu1/mu2 = thetaH0} has been calculated. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{alternative}}{The alternative hypothesis value(s) for testing means. Is a numeric vector.} \item{\code{groups}}{The group numbers. Is a numeric vector.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{effect}}{The effect for randomly creating normally distributed responses. Is a numeric vector of length \code{kMax}.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{sampleSizes}}{The sample sizes for each group and stage. Is a numeric vector of length number of stages times number of groups containing whole numbers.} \item{\code{overallReject}}{The overall rejection probability. Is a numeric vector.} \item{\code{rejectPerStage}}{The probability to reject a hypothesis per stage of the trial. Is a numeric matrix.} \item{\code{conditionalPowerAchieved}}{The calculated conditional power, under the assumption of observed or assumed effect sizes. Is a numeric matrix.} }} \keyword{internal} rpact/man/AnalysisResultsInverseNormal.Rd0000644000176200001440000001001314450467342020303 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsInverseNormal} \alias{AnalysisResultsInverseNormal} \title{Analysis Results Inverse Normal} \description{ Class for analysis results results based on an inverse normal design. } \details{ This object cannot be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the analysis results of a inverse normal design. } \section{Fields}{ \describe{ \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{pi1}}{The assumed probability or probabilities in the active treatment group in two-group designs, or the alternative probability for a one-group design.} \item{\code{pi2}}{The assumed probability in the reference group for two-group designs. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{assumedStDev}}{The assumed standard deviation(s) for means analysis. Is a numeric vector.} \item{\code{equalVariances}}{Describes if the variances in two treatment groups are assumed to be the same. Is a logical vector of length 1.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{conditionalRejectionProbabilities}}{The probabilities of rejecting the null hypothesis at each stage, given the stage has been reached. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{repeatedConfidenceIntervalLowerBounds}}{The lower bound of the confidence intervals that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedConfidenceIntervalUpperBounds}}{The upper bound of the confidence interval that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedPValues}}{The p-values that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{finalStage}}{The stage at which the trial ends, either with acceptance or rejection of the null hypothesis. Is a numeric vector of length 1.} \item{\code{finalPValues}}{The final p-value that is based on the stage-wise ordering. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{finalConfidenceIntervalLowerBounds}}{The lower bound of the confidence interval that is based on the stage-wise ordering. Is a numeric vector of length \code{kMax}.} \item{\code{finalConfidenceIntervalUpperBounds}}{The upper bound of the confidence interval that is based on the stage-wise ordering. Is a numeric vector of length \code{kMax}.} \item{\code{medianUnbiasedEstimates}}{The calculated median unbiased estimates that are based on the stage-wise ordering. Is a numeric vector of length \code{kMax}.} }} \keyword{internal} rpact/man/summary.ParameterSet.Rd0000644000176200001440000000555014335631006016523 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{summary.ParameterSet} \alias{summary.ParameterSet} \title{Parameter Set Summary} \usage{ \method{summary}{ParameterSet}( object, ..., type = 1, digits = NA_integer_, output = c("all", "title", "overview", "body") ) } \arguments{ \item{object}{A \code{\link{ParameterSet}} object.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{digits}{Defines how many digits are to be used for numeric values. Must be a positive integer of length 1.} } \value{ Returns a \code{\link{SummaryFactory}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object } } \description{ Displays a summary of \code{\link{ParameterSet}} object. } \details{ Summarizes the parameters and results of a parameter set. } \section{Summary options}{ The following options can be set globally: \enumerate{ \item \code{rpact.summary.output.size}: one of \code{c("small", "medium", "large")}; defines how many details will be included into the summary; default is \code{"large"}, i.e., all available details are displayed. \item \code{rpact.summary.justify}: one of \code{c("right", "left", "centre")}; shall the values be right-justified (the default), left-justified or centered. \item \code{rpact.summary.width}: defines the maximum number of characters to be used per line (default is \code{83}). \item \code{rpact.summary.intervalFormat}: defines how intervals will be displayed in the summary, default is \code{"[\%s; \%s]"}. \item \code{rpact.summary.digits}: defines how many digits are to be used for numeric values (default is \code{3}). \item \code{rpact.summary.digits.probs}: defines how many digits are to be used for numeric values (default is one more than value of \code{rpact.summary.digits}, i.e., \code{4}). \item \code{rpact.summary.trim.zeroes}: if \code{TRUE} (default) zeroes will always displayed as "0", e.g. "0.000" will become "0". } Example: \code{options("rpact.summary.intervalFormat" = "\%s - \%s")} } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \keyword{internal} rpact/man/print.FieldSet.Rd0000644000176200001440000000114214335631006015256 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{print.FieldSet} \alias{print.FieldSet} \title{Print Field Set Values} \usage{ \method{print}{FieldSet}(x, ...) } \arguments{ \item{x}{A \code{\link{FieldSet}} object.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} } \description{ \code{print} prints its \code{\link{FieldSet}} argument and returns it invisibly (via \code{invisible(x)}). } \details{ Prints the field set. } \keyword{internal} rpact/man/readDatasets.Rd0000644000176200001440000000446014335631006015036 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{readDatasets} \alias{readDatasets} \title{Read Multiple Datasets} \usage{ readDatasets( file, ..., header = TRUE, sep = ",", quote = "\\"", dec = ".", fill = TRUE, comment.char = "", fileEncoding = "UTF-8" ) } \arguments{ \item{file}{A CSV file (see \code{\link[utils]{read.table}}).} \item{...}{Further arguments to be passed to \code{\link[utils]{read.table}}.} \item{header}{A logical value indicating whether the file contains the names of the variables as its first line.} \item{sep}{The field separator character. Values on each line of the file are separated by this character. If sep = "," (the default for \code{readDatasets}) the separator is a comma.} \item{quote}{The set of quoting characters. To disable quoting altogether, use quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only considered for columns read as character, which is all of them unless \code{colClasses} is specified.} \item{dec}{The character used in the file for decimal points.} \item{fill}{logical. If \code{TRUE} then in case the rows have unequal length, blank fields are implicitly added.} \item{comment.char}{character: a character vector of length one containing a single character or an empty string. Use "" to turn off the interpretation of comments altogether.} \item{fileEncoding}{character string: if non-empty declares the encoding used on a file (not a connection) so the character data can be re-encoded. See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'.} } \value{ Returns a \code{\link[base]{list}} of \code{\link{Dataset}} objects. } \description{ Reads a data file and returns it as a list of dataset objects. } \details{ Reads a file that was written by \code{\link[=writeDatasets]{writeDatasets()}} before. } \examples{ dataFile <- system.file("extdata", "datasets_rates.csv", package = "rpact") if (dataFile != "") { datasets <- readDatasets(dataFile) datasets } } \seealso{ \itemize{ \item \code{\link[=readDataset]{readDataset()}} for reading a single dataset, \item \code{\link[=writeDatasets]{writeDatasets()}} for writing multiple datasets, \item \code{\link[=writeDataset]{writeDataset()}} for writing a single dataset. } } rpact/man/param_allocationRatioPlanned.Rd0000644000176200001440000000152714417202031020232 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_allocationRatioPlanned} \alias{param_allocationRatioPlanned} \title{Parameter Description: Allocation Ratio Planned} \arguments{ \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} } \description{ Parameter Description: Allocation Ratio Planned } \keyword{internal} rpact/man/param_pi2_survival.Rd0000644000176200001440000000064514232463333016242 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_pi2_survival} \alias{param_pi2_survival} \title{Parameter Description: Pi (2) for Survival Data} \arguments{ \item{pi2}{A numeric value that represents the assumed event rate in the control group, default is \code{0.2}.} } \description{ Parameter Description: Pi (2) for Survival Data } \keyword{internal} rpact/man/AnalysisResultsEnrichmentFisher.Rd0000644000176200001440000001003514450467342020760 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsEnrichmentFisher} \alias{AnalysisResultsEnrichmentFisher} \title{Analysis Results Enrichment Fisher} \description{ Class for enrichment analysis results based on a Fisher combination test design. } \details{ This object cannot be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the multi-arm analysis results of a Fisher combination test design. } \section{Fields}{ \describe{ \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{pi1}}{The assumed probability or probabilities in the active treatment group in two-group designs, or the alternative probability for a one-group design.} \item{\code{pi2}}{The assumed probability in the reference group for two-group designs. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{assumedStDevs}}{Assumed standard deviations to calculate conditional power in multi-arm trials or enrichment designs. Is a numeric vector.} \item{\code{piTreatments}}{The assumed rates in the treatment groups for multi-arm and enrichment designs, i.e., designs with multiple subsets.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple (i.e., >2) treatment arms or population enrichment designs when testing means. Available options for multiple arms: \code{"overallPooled", "pairwisePooled", "notPooled"}. Available options for enrichment designs: \code{"pooled", "pooledFromFull", "notPooled"}.} \item{\code{conditionalRejectionProbabilities}}{The probabilities of rejecting the null hypothesis at each stage, given the stage has been reached. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{repeatedConfidenceIntervalLowerBounds}}{The lower bound of the confidence intervals that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedConfidenceIntervalUpperBounds}}{The upper bound of the confidence interval that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedPValues}}{The p-values that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{piControls}}{The assumed rates in the control group for enrichment designs, i.e., designs with multiple subsets.} \item{\code{conditionalPowerSimulated}}{The simulated conditional power, under the assumption of observed or assumed effect sizes.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. When testing means and rates, a non-stratified analysis can be performed on overall data. For survival data, only a stratified analysis is possible. Is a logical vector of length 1.} }} \keyword{internal} rpact/man/param_typeOfDesign.Rd0000644000176200001440000000161614335631010016205 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_typeOfDesign} \alias{param_typeOfDesign} \title{Parameter Description: Type of Design} \arguments{ \item{typeOfDesign}{The type of design. Type of design is one of the following: O'Brien & Fleming (\code{"OF"}), Pocock (\code{"P"}), Wang & Tsiatis Delta class (\code{"WT"}), Pampallona & Tsiatis (\code{"PT"}), Haybittle & Peto ("HP"), Optimum design within Wang & Tsiatis class (\code{"WToptimum"}), O'Brien & Fleming type alpha spending (\code{"asOF"}), Pocock type alpha spending (\code{"asP"}), Kim & DeMets alpha spending (\code{"asKD"}), Hwang, Shi & DeCani alpha spending (\code{"asHSD"}), user defined alpha spending (\code{"asUser"}), no early efficacy stop (\code{"noEarlyEfficacy"}), default is \code{"OF"}.} } \description{ Parameter Description: Type of Design } \keyword{internal} rpact/man/print.ParameterSet.Rd0000644000176200001440000000152414335631006016157 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{print.ParameterSet} \alias{print.ParameterSet} \title{Print Parameter Set Values} \usage{ \method{print}{ParameterSet}(x, ..., markdown = FALSE) } \arguments{ \item{x}{The \code{\link{ParameterSet}} object to print.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{markdown}{If \code{TRUE}, the object \code{x} will be printed using markdown syntax; normal representation will be used otherwise (default is \code{FALSE})} } \description{ \code{print} prints its \code{ParameterSet} argument and returns it invisibly (via \code{invisible(x)}). } \details{ Prints the parameters and results of a parameter set. } \keyword{internal} rpact/man/getDesignConditionalDunnett.Rd0000644000176200001440000000602414335631006020067 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \name{getDesignConditionalDunnett} \alias{getDesignConditionalDunnett} \title{Get Design Conditional Dunnett Test} \usage{ getDesignConditionalDunnett( alpha = 0.025, informationAtInterim = 0.5, secondStageConditioning = TRUE ) } \arguments{ \item{alpha}{The significance level alpha, default is \code{0.025}. Must be a positive numeric of length 1.} \item{informationAtInterim}{The information to be expected at interim, default is \code{informationAtInterim = 0.5}.} \item{secondStageConditioning}{The way the second stage p-values are calculated within the closed system of hypotheses. If \code{secondStageConditioning = FALSE} is specified, the unconditional adjusted p-values are used, otherwise conditional adjusted p-values are calculated, default is \code{secondStageConditioning = TRUE} (for details, see Koenig et al., 2008).} } \value{ Returns a \code{\link{TrialDesign}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.TrialDesign]{plot()}} to plot the object, \item \code{\link[=as.data.frame.TrialDesign]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Defines the design to perform an analysis with the conditional Dunnett test. } \details{ For performing the conditional Dunnett test the design must be defined through this function. You can define the information fraction and the way of how to compute the second stage p-values only in the design definition, and not in the analysis call.\cr See \code{\link[=getClosedConditionalDunnettTestResults]{getClosedConditionalDunnettTestResults()}} for an example and Koenig et al. (2008) and Wassmer & Brannath (2016), chapter 11 for details of the test procedure. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \seealso{ Other design functions: \code{\link{getDesignCharacteristics}()}, \code{\link{getDesignFisher}()}, \code{\link{getDesignGroupSequential}()}, \code{\link{getDesignInverseNormal}()}, \code{\link{getGroupSequentialProbabilities}()}, \code{\link{getPowerAndAverageSampleNumber}()} } \concept{design functions} rpact/man/param_piecewiseSurvivalTime.Rd0000644000176200001440000000111014335631010020122 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_piecewiseSurvivalTime} \alias{param_piecewiseSurvivalTime} \title{Parameter Description: Piecewise Survival Time} \arguments{ \item{piecewiseSurvivalTime}{A vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function \cr (for details see \code{\link[=getPiecewiseSurvivalTime]{getPiecewiseSurvivalTime()}}).} } \description{ Parameter Description: Piecewise Survival Time } \keyword{internal} rpact/man/param_maxNumberOfSubjects_survival.Rd0000644000176200001440000000111514312324046021463 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_maxNumberOfSubjects_survival} \alias{param_maxNumberOfSubjects_survival} \title{Parameter Description: Maximum Number Of Subjects For Survival Endpoint} \arguments{ \item{maxNumberOfSubjects}{\code{maxNumberOfSubjects > 0} needs to be specified. If accrual time and accrual intensity are specified, this will be calculated. Must be a positive integer of length 1.} } \description{ Parameter Description: Maximum Number Of Subjects For Survival Endpoint } \keyword{internal} rpact/man/param_selectPopulationsFunction.Rd0000644000176200001440000000111514232463334021032 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_selectPopulationsFunction} \alias{param_selectPopulationsFunction} \title{Parameter Description: Select Populations Function} \arguments{ \item{selectPopulationsFunction}{Optionally, a function can be entered that defines the way of how populations are selected. This function is allowed to depend on \code{effectVector} with length \code{populations} and \code{stage} (see examples).} } \description{ Parameter Description: Select Populations Function } \keyword{internal} rpact/man/getSampleSizeMeans.Rd0000644000176200001440000001321614372411347016176 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getSampleSizeMeans} \alias{getSampleSizeMeans} \title{Get Sample Size Means} \usage{ getSampleSizeMeans( design = NULL, ..., groups = 2, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = seq(0.2, 1, 0.2), stDev = 1, allocationRatioPlanned = NA_real_ ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{normalApproximation}{The type of computation of the p-values. If \code{TRUE}, the variance is assumed to be known, default is \code{FALSE}, i.e., the calculations are performed with the t distribution.} \item{meanRatio}{If \code{TRUE}, the sample size for one-sided testing of H0: \code{mu1 / mu2 = thetaH0} is calculated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{alternative}{The alternative hypothesis value for testing means. This can be a vector of assumed alternatives, default is \code{seq(0, 1, 0.2)} (power calculations) or \code{seq(0.2, 1, 0.2)} (sample size calculations).} \item{stDev}{The standard deviation under which the sample size or power calculation is performed, default is \code{1}. If \code{meanRatio = TRUE} is specified, \code{stDev} defines the coefficient of variation \code{sigma / mu2}. Must be a positive numeric of length 1.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the smallest overall sample size is determined.} } \value{ Returns a \code{\link{TrialDesignPlan}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.TrialDesignSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.TrialDesignPlan]{plot()}} to plot the object, \item \code{\link[=as.data.frame.TrialDesignPlan]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the sample size for testing means in one or two samples. } \details{ At given design the function calculates the stage-wise (non-cumulated) and maximum sample size for testing means. In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. A null hypothesis value thetaH0 != 0 for testing the difference of two means or thetaH0 != 1 for testing the ratio of two means can be specified. Critical bounds and stopping for futility bounds are provided at the effect scale (mean, mean difference, or mean ratio, respectively) for each sample size calculation separately. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Calculate sample sizes in a fixed sample size parallel group design # with allocation ratio \code{n1 / n2 = 2} for a range of # alternative values 1, ..., 5 with assumed standard deviation = 3.5; # two-sided alpha = 0.05, power 1 - beta = 90\%: getSampleSizeMeans(alpha = 0.05, beta = 0.1, sided = 2, groups = 2, alternative = seq(1, 5, 1), stDev = 3.5, allocationRatioPlanned = 2) \dontrun{ # Calculate sample sizes in a three-stage Pocock paired comparison design testing # H0: mu = 2 for a range of alternative values 3,4,5 with assumed standard # deviation = 3.5; one-sided alpha = 0.05, power 1 - beta = 90\%: getSampleSizeMeans(getDesignGroupSequential(typeOfDesign = "P", alpha = 0.05, sided = 1, beta = 0.1), groups = 1, thetaH0 = 2, alternative = seq(3, 5, 1), stDev = 3.5) } } \seealso{ Other sample size functions: \code{\link{getSampleSizeRates}()}, \code{\link{getSampleSizeSurvival}()} } \concept{sample size functions} rpact/man/getConditionalPower.Rd0000644000176200001440000001420614417202031016402 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getConditionalPower} \alias{getConditionalPower} \title{Get Conditional Power} \usage{ getConditionalPower(stageResults, ..., nPlanned, allocationRatioPlanned = 1) } \arguments{ \item{stageResults}{The results at given stage, obtained from \code{\link[=getStageResults]{getStageResults()}}.} \item{...}{Further (optional) arguments to be passed: \describe{ \item{\code{thetaH1} and \code{stDevH1} (or \code{assumedStDev} / \code{assumedStDevs}), \code{pi1}, \code{pi2}, or \code{piTreatments}, \code{piControl(s)}}{ The assumed effect size, standard deviation or rates to calculate the conditional power if \code{nPlanned} is specified. For survival designs, \code{thetaH1} refers to the hazard ratio. For one-armed trials with binary outcome, only \code{pi1} can be specified, for two-armed trials with binary outcome, \code{pi1} and \code{pi2} can be specified referring to the assumed treatment and control rate, respectively. In multi-armed or enrichment designs, you can specify a value or a vector with elements referring to the treatment arms or the sub-populations, respectively. For testing rates, the parameters to be specified are \code{piTreatments} and \code{piControl} (multi-arm designs) and \code{piTreatments} and \code{piControls} (enrichment designs).\cr If not specified, the conditional power is calculated under the assumption of observed effect sizes, standard deviations, rates, or hazard ratios.} \item{\code{iterations}}{Iterations for simulating the power for Fisher's combination test. If the power for more than one remaining stages is to be determined for Fisher's combination test, it is estimated via simulation with specified \cr \code{iterations}, the default is \code{1000}.} \item{\code{seed}}{Seed for simulating the conditional power for Fisher's combination test. See above, default is a random seed.} }} \item{nPlanned}{The additional (i.e., "new" and not cumulative) sample size planned for each of the subsequent stages. The argument must be a vector with length equal to the number of remaining stages and contain the combined sample size from both treatment groups if two groups are considered. For survival outcomes, it should contain the planned number of additional events. For multi-arm designs, it is the per-comparison (combined) sample size. For enrichment designs, it is the (combined) sample size for the considered sub-population.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} } \value{ Returns a \code{\link{ConditionalPowerResults}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.ParameterSet]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Calculates and returns the conditional power. } \details{ The conditional power is calculated if the planned sample size for the subsequent stages is specified.\cr For testing rates in a two-armed trial, pi1 and pi2 typically refer to the rates in the treatment and the control group, respectively. This is not mandatory, however, and so pi1 and pi2 can be interchanged. In many-to-one multi-armed trials, piTreatments and piControl refer to the rates in the treatment arms and the one control arm, and so they cannot be interchanged. piTreatments and piControls in enrichment designs can principally be interchanged, but we use the plural form to indicate that the rates can be differently specified for the sub-populations. For Fisher's combination test, the conditional power for more than one remaining stages is estimated via simulation. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ \dontrun{ data <- getDataset( n1 = c(22, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 1, 2.5), stds1 = c(1, 2, 2, 1.3), stds2 = c(1, 2, 2, 1.3)) stageResults <- getStageResults( getDesignGroupSequential(kMax = 4), dataInput = data, stage = 2, directionUpper = FALSE) getConditionalPower(stageResults, thetaH1 = -0.4, nPlanned = c(64, 64), assumedStDev = 1.5, allocationRatioPlanned = 3) } } \seealso{ \code{\link[=plot.StageResults]{plot.StageResults()}} or \code{\link[=plot.AnalysisResults]{plot.AnalysisResults()}} for plotting the conditional power. Other analysis functions: \code{\link{getAnalysisResults}()}, \code{\link{getClosedCombinationTestResults}()}, \code{\link{getClosedConditionalDunnettTestResults}()}, \code{\link{getConditionalRejectionProbabilities}()}, \code{\link{getFinalConfidenceInterval}()}, \code{\link{getFinalPValue}()}, \code{\link{getRepeatedConfidenceIntervals}()}, \code{\link{getRepeatedPValues}()}, \code{\link{getStageResults}()}, \code{\link{getTestActions}()} } \concept{analysis functions} rpact/man/plot.AnalysisResults.Rd0000644000176200001440000001411514417202031016543 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \name{plot.AnalysisResults} \alias{plot.AnalysisResults} \title{Analysis Results Plotting} \usage{ \method{plot}{AnalysisResults}( x, y, ..., type = 1L, nPlanned = NA_real_, allocationRatioPlanned = NA_real_, main = NA_character_, xlab = NA_character_, ylab = NA_character_, legendTitle = NA_character_, palette = "Set1", legendPosition = NA_integer_, showSource = FALSE, grid = 1, plotSettings = NULL ) } \arguments{ \item{x}{The analysis results at given stage, obtained from \code{\link[=getAnalysisResults]{getAnalysisResults()}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional \link[=param_three_dots_plot]{plot arguments}. Furthermore the following arguments can be defined: \itemize{ \item \code{thetaRange}: A range of assumed effect sizes if testing means or a survival design was specified. Additionally, if testing means was selected, \code{assumedStDev} (assumed standard deviation) can be specified (default is \code{1}). \item \code{piTreatmentRange}: A range of assumed rates pi1 to calculate the conditional power. Additionally, if a two-sample comparison was selected, \code{pi2} can be specified (default is the value from \code{\link[=getAnalysisResults]{getAnalysisResults()}}). \item \code{directionUpper}: Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. \item \code{\link[=param_thetaH0]{thetaH0}}: The null hypothesis value, default is \code{0} for the normal and the binary case, it is \code{1} for the survival case. For testing a rate in one sample, a value thetaH0 in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}. }} \item{type}{The plot type (default = 1). Note that at the moment only one type (the conditional power plot) is available.} \item{nPlanned}{The additional (i.e., "new" and not cumulative) sample size planned for each of the subsequent stages. The argument must be a vector with length equal to the number of remaining stages and contain the combined sample size from both treatment groups if two groups are considered. For survival outcomes, it should contain the planned number of additional events. For multi-arm designs, it is the per-comparison (combined) sample size. For enrichment designs, it is the (combined) sample size for the considered sub-population.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{main}{The main title, default is \code{"Dataset"}.} \item{xlab}{The x-axis label, default is \code{"Stage"}.} \item{ylab}{The y-axis label.} \item{legendTitle}{The legend title, default is \code{""}.} \item{palette}{The palette, default is \code{"Set1"}.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{Logical. If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with the base R \code{plot} function. Alternatively \code{showSource} can be defined as one of the following character values: \itemize{ \item \code{"commands"}: returns a character vector with plot commands \item \code{"axes"}: returns a list with the axes definitions \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function does not stop if an error occurs) \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function stops if an error occurs) } Note: no plot object will be returned if \code{showSource} is a character.} \item{grid}{An integer value specifying the output of multiple plots. By default (\code{1}) a list of \code{ggplot} objects will be returned. If a \code{grid} value > 1 was specified, a grid plot will be returned if the number of plots is <= specified \code{grid} value; a list of \code{ggplot} objects will be returned otherwise. If \code{grid = 0} is specified, all plots will be created using \code{\link[base]{print}} command and a list of \code{ggplot} objects will be returned invisible. Note that one of the following packages must be installed to create a grid plot: 'ggpubr', 'gridExtra', or 'cowplot'.} \item{plotSettings}{An object of class \code{PlotSettings} created by \code{\link[=getPlotSettings]{getPlotSetting()s}}.} } \value{ Returns a \code{ggplot2} object. } \description{ Plots the conditional power together with the likelihood function. } \details{ The conditional power is calculated only if effect size and sample size is specified. } \examples{ \dontrun{ design <- getDesignGroupSequential(kMax = 2) dataExample <- getDataset( n = c(20, 30), means = c(50, 51), stDevs = c(130, 140) ) result <- getAnalysisResults(design = design, dataInput = dataExample, thetaH0 = 20, nPlanned = c(30), thetaH1 = 1.5, stage = 1) if (require(ggplot2)) plot(result, thetaRange = c(0, 100)) } } rpact/man/param_typeOfSelection.Rd0000644000176200001440000000166114335631011016722 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_typeOfSelection} \alias{param_typeOfSelection} \title{Parameter Description: Type of Selection} \arguments{ \item{typeOfSelection}{The way the treatment arms or populations are selected at interim. Five options are available: \code{"best"}, \code{"rbest"}, \code{"epsilon"}, \code{"all"}, and \code{"userDefined"}, default is \code{"best"}.\cr For \code{"rbest"} (select the \code{rValue} best treatment arms/populations), the parameter \code{rValue} has to be specified, for \code{"epsilon"} (select treatment arm/population not worse than epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. If \code{"userDefined"} is selected, \code{"selectArmsFunction"} or \code{"selectPopulationsFunction"} has to be specified.} } \description{ Parameter Description: Type of Selection } \keyword{internal} rpact/man/getFinalPValue.Rd0000644000176200001440000000301714411251744015277 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getFinalPValue} \alias{getFinalPValue} \title{Get Final P Value} \usage{ getFinalPValue(stageResults, ...) } \arguments{ \item{stageResults}{The results at given stage, obtained from \code{\link[=getStageResults]{getStageResults()}}.} \item{...}{Only available for backward compatibility.} } \value{ Returns a \code{\link[base]{list}} containing \itemize{ \item \code{finalStage}, \item \code{pFinal}. } } \description{ Returns the final p-value for given stage results. } \details{ The calculation of the final p-value is based on the stage-wise ordering of the sample space. This enables the calculation for both the non-adaptive and the adaptive case. For Fisher's combination test, it is available for \code{kMax = 2} only. } \examples{ \dontrun{ design <- getDesignInverseNormal(kMax = 2) data <- getDataset( n = c( 20, 30), means = c( 50, 51), stDevs = c(130, 140) ) getFinalPValue(getStageResults(design, dataInput = data)) } } \seealso{ Other analysis functions: \code{\link{getAnalysisResults}()}, \code{\link{getClosedCombinationTestResults}()}, \code{\link{getClosedConditionalDunnettTestResults}()}, \code{\link{getConditionalPower}()}, \code{\link{getConditionalRejectionProbabilities}()}, \code{\link{getFinalConfidenceInterval}()}, \code{\link{getRepeatedConfidenceIntervals}()}, \code{\link{getRepeatedPValues}()}, \code{\link{getStageResults}()}, \code{\link{getTestActions}()} } \concept{analysis functions} rpact/man/getData.Rd0000644000176200001440000000702414402556624014011 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \name{getData} \alias{getData} \alias{getData.SimulationResults} \title{Get Simulation Data} \usage{ getData(x) getData.SimulationResults(x) } \arguments{ \item{x}{A \code{\link{SimulationResults}} object created by \code{\link[=getSimulationMeans]{getSimulationMeans()}},\cr \code{\link[=getSimulationRates]{getSimulationRates()}}, \code{\link[=getSimulationSurvival]{getSimulationSurvival()}}, \code{\link[=getSimulationMultiArmMeans]{getSimulationMultiArmMeans()}},\cr \code{\link[=getSimulationMultiArmRates]{getSimulationMultiArmRates()}}, or \code{\link[=getSimulationMultiArmSurvival]{getSimulationMultiArmSurvival()}}.} } \value{ Returns a \code{\link[base]{data.frame}}. } \description{ Returns the aggregated simulation data. } \details{ This function can be used to get the aggregated simulated data from an simulation results object, for example, obtained by \code{\link[=getSimulationSurvival]{getSimulationSurvival()}}. In this case, the data frame contains the following columns: \enumerate{ \item \code{iterationNumber}: The number of the simulation iteration. \item \code{stageNumber}: The stage. \item \code{pi1}: The assumed or derived event rate in the treatment group. \item \code{pi2}: The assumed or derived event rate in the control group. \item \code{hazardRatio}: The hazard ratio under consideration (if available). \item \code{analysisTime}: The analysis time. \item \code{numberOfSubjects}: The number of subjects under consideration when the (interim) analysis takes place. \item \code{eventsPerStage1}: The observed number of events per stage in treatment group 1. \item \code{eventsPerStage2}: The observed number of events per stage in treatment group 2. \item \code{eventsPerStage}: The observed number of events per stage in both treatment groups. \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. \item \code{eventsNotAchieved}: 1 if number of events could not be reached with observed number of subjects, 0 otherwise. \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. \item \code{testStatistic}: The test statistic that is used for the test decision, depends on which design was chosen (group sequential, inverse normal, or Fisher combination test)' \item \code{logRankStatistic}: Z-score statistic which corresponds to a one-sided log-rank test at considered stage. \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for selected sample size and effect. The effect is either estimated from the data or can be user defined with \code{thetaH1} or \code{pi1H1} and \code{pi2H1}. \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. \item \code{hazardRatioEstimateLR}: The estimated hazard ratio, derived from the log-rank statistic. } A subset of variables is provided for \code{\link[=getSimulationMeans]{getSimulationMeans()}}, \code{\link[=getSimulationRates]{getSimulationRates()}}, \code{\link[=getSimulationMultiArmMeans]{getSimulationMultiArmMeans()}},\cr \code{\link[=getSimulationMultiArmRates]{getSimulationMultiArmRates()}}, or \code{\link[=getSimulationMultiArmSurvival]{getSimulationMultiArmSurvival()}}. } \examples{ results <- getSimulationSurvival( pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, eventTime = 12, accrualTime = 24, plannedEvents = 40, maxNumberOfSubjects = 200, maxNumberOfIterations = 50 ) data <- getData(results) head(data) dim(data) } rpact/man/SimulationResults.Rd0000644000176200001440000000206414411263557016145 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResults} \alias{SimulationResults} \title{Class for Simulation Results} \description{ A class for simulation results. } \details{ \code{SimulationResults} is the basic class for \itemize{ \item \code{\link{SimulationResultsMeans}}, \item \code{\link{SimulationResultsRates}}, \item \code{\link{SimulationResultsSurvival}}, \item \code{\link{SimulationResultsMultiArmMeans}}, \item \code{\link{SimulationResultsMultiArmRates}}, \item \code{\link{SimulationResultsMultiArmSurvival}}, \item \code{\link{SimulationResultsEnrichmentMeans}}, \item \code{\link{SimulationResultsEnrichmentRates}}, and \item \code{\link{SimulationResultsEnrichmentSurvival}}. } } \section{Fields}{ \describe{ \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} }} \keyword{internal} rpact/man/StageResultsMeans.Rd0000644000176200001440000000455014450467342016053 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsMeans} \alias{StageResultsMeans} \title{Stage Results of Means} \description{ Class for stage results of means. } \details{ This object cannot be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of means. } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{overallTestStatistics}}{The overall, i.e., cumulated test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{pValues}}{The stage-wise p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{overallPValues}}{The overall, i.e., cumulated p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{direction}}{Specifies the direction of the alternative, is either "upper" or "lower". Only applicable for one-sided testing.} \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{equalVariances}}{Describes if the variances in two treatment groups are assumed to be the same. Is a logical vector of length 1.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{weightsFisher}}{The weights for Fisher's combination test. Is a numeric vector of length \code{kMax}.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{weightsInverseNormal}}{The weights for the inverse normal statistic. Is a numeric vector of length \code{kMax}.} \item{\code{...}}{Names of \code{dataInput}.} }} \keyword{internal} rpact/man/param_palette.Rd0000644000176200001440000000050014232463334015242 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_palette} \alias{param_palette} \title{Parameter Description: Palette} \arguments{ \item{palette}{The palette, default is \code{"Set1"}.} } \description{ Parameter Description: Palette } \keyword{internal} rpact/man/param_calcSubjectsFunction.Rd0000644000176200001440000000121614370155527017731 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_calcSubjectsFunction} \alias{param_calcSubjectsFunction} \title{Parameter Description: Calculate Subjects Function} \arguments{ \item{calcSubjectsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, sample size recalculation is performed with conditional power and specified \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details and examples).} } \description{ Parameter Description: Calculate Subjects Function } \keyword{internal} rpact/man/TrialDesign.Rd0000644000176200001440000000424714450467342014652 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \docType{class} \name{TrialDesign} \alias{TrialDesign} \title{Basic Trial Design} \description{ Basic class for trial designs. } \details{ \code{TrialDesign} is the basic class for \itemize{ \item \code{\link{TrialDesignFisher}}, \item \code{\link{TrialDesignGroupSequential}}, \item \code{\link{TrialDesignInverseNormal}}, and \item \code{\link{TrialDesignConditionalDunnett}}. } } \section{Fields}{ \describe{ \item{\code{kMax}}{The maximum number of stages \code{K}. Is a numeric vector of length 1 containing a whole number.} \item{\code{alpha}}{The significance level alpha, default is 0.025. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{informationRates}}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{userAlphaSpending}}{The user defined alpha spending. Contains the cumulative alpha-spending (type I error rate) up to each interim stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{criticalValues}}{The critical values for each stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{stageLevels}}{The adjusted significance levels to reach significance in a group sequential design. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{alphaSpent}}{The cumulative alpha spent at each stage. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{bindingFutility}}{If \code{TRUE}, the calculation of the critical values is affected by the futility bounds and the futility threshold is binding in the sense that the study must be stopped if the futility condition was reached (default is \code{FALSE}) Is a logical vector of length 1.} \item{\code{tolerance}}{The numerical tolerance, default is \code{1e-06}. Is a numeric vector of length 1.} }} \keyword{internal} rpact/man/plot.NumberOfSubjects.Rd0000644000176200001440000000633614335631007016634 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_event_probabilities.R \name{plot.NumberOfSubjects} \alias{plot.NumberOfSubjects} \title{Number Of Subjects Plotting} \usage{ \method{plot}{NumberOfSubjects}( x, y, ..., allocationRatioPlanned = NA_real_, main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1L, legendTitle = NA_character_, palette = "Set1", plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, plotSettings = NULL ) } \arguments{ \item{x}{The object that inherits from \code{\link{NumberOfSubjects}}.} \item{y}{An optional object that inherits from \code{\link{EventProbabilities}}.} \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. Will be ignored if \code{y} is undefined.} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{type}{The plot type (default = 1). Note that at the moment only one type is available.} \item{legendTitle}{The legend title, default is \code{""}.} \item{palette}{The palette, default is \code{"Set1"}.} \item{plotPointsEnabled}{Logical. If \code{TRUE}, additional points will be plotted.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{Logical. If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with the base R \code{plot} function. Alternatively \code{showSource} can be defined as one of the following character values: \itemize{ \item \code{"commands"}: returns a character vector with plot commands \item \code{"axes"}: returns a list with the axes definitions \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function does not stop if an error occurs) \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function stops if an error occurs) } Note: no plot object will be returned if \code{showSource} is a character.} \item{plotSettings}{An object of class \code{PlotSettings} created by \code{\link[=getPlotSettings]{getPlotSetting()s}}.} } \value{ Returns a \code{ggplot2} object. } \description{ Plots an object that inherits from class \code{\link{NumberOfSubjects}}. } \details{ Generic function to plot an "number of subjects" object. Generic function to plot a parameter set. } rpact/man/param_lambda1.Rd0000644000176200001440000000077014312324046015111 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_lambda1} \alias{param_lambda1} \title{Parameter Description: Lambda (1)} \arguments{ \item{lambda1}{The assumed hazard rate in the treatment group, there is no default. \code{lambda1} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} } \description{ Parameter Description: Lambda (1) } \keyword{internal} rpact/man/getObservedInformationRates.Rd0000644000176200001440000000500414446314101020100 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_utilities.R \name{getObservedInformationRates} \alias{getObservedInformationRates} \title{Get Observed Information Rates} \usage{ getObservedInformationRates( dataInput, ..., maxInformation = NULL, informationEpsilon = NULL, stage = NA_integer_ ) } \arguments{ \item{dataInput}{The dataset for which the information rates shall be recalculated.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{maxInformation}{Positive integer value specifying the maximum information.} \item{informationEpsilon}{Positive integer value specifying the absolute information epsilon, which defines the maximum distance from the observed information to the maximum information that causes the final analysis. Updates at the final analysis in case the observed information at the final analysis is smaller ("under-running") than the planned maximum information \code{maxInformation}, default is 0. Alternatively, a floating-point number > 0 and < 1 can be specified to define a relative information epsilon.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} } \value{ Returns a list that summarizes the observed information rates. } \description{ Recalculates the observed information rates from the specified dataset. } \details{ For means and rates the maximum information is the maximum number of subjects or the relative proportion if \code{informationEpsilon} < 1; for survival data it is the maximum number of events or the relative proportion if \code{informationEpsilon} < 1. } \examples{ # Absolute information epsilon: # decision rule 45 >= 46 - 1, i.e., under-running data <- getDataset( overallN = c(22, 45), overallEvents = c(11, 28) ) getObservedInformationRates(data, maxInformation = 46, informationEpsilon = 1 ) # Relative information epsilon: # last information rate = 45/46 = 0.9783, # is > 1 - 0.03 = 0.97, i.e., under-running data <- getDataset( overallN = c(22, 45), overallEvents = c(11, 28) ) getObservedInformationRates(data, maxInformation = 46, informationEpsilon = 0.03 ) } \seealso{ \itemize{ \item \code{\link[=getAnalysisResults]{getAnalysisResults()}} for using \code{getObservedInformationRates()} implicit, \item \href{https://www.rpact.org/vignettes/planning/rpact_boundary_update_example/}{www.rpact.org/vignettes/planning/rpact_boundary_update_example} } } rpact/man/param_maxNumberOfSubjects.Rd0000644000176200001440000000075714232463334017550 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_maxNumberOfSubjects} \alias{param_maxNumberOfSubjects} \title{Parameter Description: Maximum Number Of Subjects} \arguments{ \item{maxNumberOfSubjects}{\code{maxNumberOfSubjects > 0} needs to be specified. For two treatment arms, it is the maximum number of subjects for both treatment arms.} } \description{ Parameter Description: Maximum Number Of Subjects } \keyword{internal} rpact/man/StageResultsEnrichmentMeans.Rd0000644000176200001440000000724514450467342020074 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsEnrichmentMeans} \alias{StageResultsEnrichmentMeans} \title{Stage Results Enrichment Means} \description{ Class for stage results of enrichment means data } \details{ This object cannot be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of enrichment means. } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{direction}}{Specifies the direction of the alternative, is either "upper" or "lower". Only applicable for one-sided testing.} \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple (i.e., >2) treatment arms or population enrichment designs when testing means. Available options for multiple arms: \code{"overallPooled", "pairwisePooled", "notPooled"}. Available options for enrichment designs: \code{"pooled", "pooledFromFull", "notPooled"}.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{overallTestStatistics}}{The overall, i.e., cumulated test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{pValues}}{The stage-wise p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{overallPValues}}{The overall, i.e., cumulated p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{overallStDevs}}{The overall, i.e., cumulative standard deviations. Is a numeric vector of length number of stages times number of groups.} \item{\code{overallPooledStDevs}}{The overall pooled standard deviations. Is a numeric matrix.} \item{\code{separatePValues}}{The p-values from the separate stages. Is a numeric matrix.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{singleStepAdjustedPValues}}{The adjusted p-value for testing multiple hypotheses per stage of the trial.} \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. When testing means and rates, a non-stratified analysis can be performed on overall data. For survival data, only a stratified analysis is possible. Is a logical vector of length 1.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{weightsFisher}}{The weights for Fisher's combination test. Is a numeric vector of length \code{kMax}.} \item{\code{weightsInverseNormal}}{The weights for the inverse normal statistic. Is a numeric vector of length \code{kMax}.} }} \keyword{internal} rpact/man/getFinalConfidenceInterval.Rd0000644000176200001440000001062214427374266017661 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getFinalConfidenceInterval} \alias{getFinalConfidenceInterval} \title{Get Final Confidence Interval} \usage{ getFinalConfidenceInterval( design, dataInput, ..., directionUpper = TRUE, thetaH0 = NA_real_, tolerance = 1e-06, stage = NA_integer_ ) } \arguments{ \item{design}{The trial design.} \item{dataInput}{The summary data used for calculating the test results. This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival} and should be created with the function \code{\link[=getDataset]{getDataset()}}. For more information see \code{\link[=getDataset]{getDataset()}}.} \item{...}{Further (optional) arguments to be passed: \describe{ \item{\code{normalApproximation}}{ The type of computation of the p-values. Default is \code{FALSE} for testing means (i.e., the t test is used) and TRUE for testing rates and the hazard ratio. For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test (one sample) or the exact test of Fisher (two samples) is used for calculating the p-values. In the survival setting, \code{normalApproximation = FALSE} has no effect.} \item{\code{equalVariances}}{The type of t test. For testing means in two treatment groups, either the t test assuming that the variances are equal or the t test without assuming this, i.e., the test of Welch-Satterthwaite is calculated, default is \code{TRUE}.} }} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{tolerance}{The numerical tolerance, default is \code{1e-06}. Must be a positive numeric of length 1.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} } \value{ Returns a \code{\link[base]{list}} containing \itemize{ \item \code{finalStage}, \item \code{medianUnbiased}, \item \code{finalConfidenceInterval}, \item \code{medianUnbiasedGeneral}, and \item \code{finalConfidenceIntervalGeneral}. } } \description{ Returns the final confidence interval for the parameter of interest. It is based on the prototype case, i.e., the test for testing a mean for normally distributed variables. } \details{ Depending on \code{design} and \code{dataInput} the final confidence interval and median unbiased estimate that is based on the stage-wise ordering of the sample space will be calculated and returned. Additionally, a non-standardized ("general") version is provided, the estimated standard deviation must be used to obtain the confidence interval for the parameter of interest. For the inverse normal combination test design with more than two stages, a warning informs that the validity of the confidence interval is theoretically shown only if no sample size change was performed. } \examples{ \dontrun{ design <- getDesignInverseNormal(kMax = 2) data <- getDataset( n = c(20, 30), means = c(50, 51), stDevs = c(130, 140) ) getFinalConfidenceInterval(design, dataInput = data) } } \seealso{ Other analysis functions: \code{\link{getAnalysisResults}()}, \code{\link{getClosedCombinationTestResults}()}, \code{\link{getClosedConditionalDunnettTestResults}()}, \code{\link{getConditionalPower}()}, \code{\link{getConditionalRejectionProbabilities}()}, \code{\link{getFinalPValue}()}, \code{\link{getRepeatedConfidenceIntervals}()}, \code{\link{getRepeatedPValues}()}, \code{\link{getStageResults}()}, \code{\link{getTestActions}()} } \concept{analysis functions} rpact/man/param_groups.Rd0000644000176200001440000000057214232463334015134 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_groups} \alias{param_groups} \title{Parameter Description: Number Of Treatment Groups} \arguments{ \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} } \description{ Parameter Description: Number Of Treatment Groups } \keyword{internal} rpact/man/getWideFormat.Rd0000644000176200001440000000127614313321256015174 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_utilities.R \name{getWideFormat} \alias{getWideFormat} \title{Get Wide Format} \usage{ getWideFormat(dataInput) } \value{ A \code{\link[base]{data.frame}} will be returned. } \description{ Returns the specified dataset as a \code{\link[base]{data.frame}} in so-called wide format. } \details{ In the wide format (unstacked), the data are presented with each different data variable in a separate column, i.e., the different groups are in separate columns. } \seealso{ \code{\link[=getLongFormat]{getLongFormat()}} for returning the dataset as a \code{\link[base]{data.frame}} in long format. } \keyword{internal} rpact/man/dataEnrichmentMeansStratified.Rd0000644000176200001440000000112414313321260020347 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataEnrichmentMeansStratified} \alias{dataEnrichmentMeansStratified} \title{Stratified Enrichment Dataset of Means} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataEnrichmentMeansStratified } \description{ A dataset containing the sample sizes, means, and standard deviations of two groups. Use \code{getDataset(dataEnrichmentMeansStratified)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/SimulationResultsEnrichmentSurvival.Rd0000644000176200001440000001555714450467343021733 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsEnrichmentSurvival} \alias{SimulationResultsEnrichmentSurvival} \title{Class for Simulation Results Enrichment Survival} \description{ A class for simulation results survival in enrichment designs. } \details{ Use \code{\link[=getSimulationEnrichmentSurvival]{getSimulationEnrichmentSurvival()}} to create an object of this type. } \section{Fields}{ \describe{ \item{\code{maxNumberOfIterations}}{The number of simulation iterations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{plannedSubjects}}{Determines the number of cumulated (overall) subjects when the interim stages are planned. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, refers to the number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{minNumberOfSubjectsPerStage}}{Determines the minimum number of subjects per stage for data-driven sample size recalculation. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, is the minimum number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{maxNumberOfSubjectsPerStage}}{Determines the maximum number of subjects per stage for data-driven sample size recalculation. For two treatment arms, is the number of subjects for both treatment arms. For multi-arm designs, is the minimum number of subjects per selected active arm. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{calcEventsFunction}}{An optional function that can be entered to define how event size is recalculated. By default, recalculation is performed with conditional power with specified \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage}.} \item{\code{expectedNumberOfEvents}}{The expected number of events under specified alternative. Is a numeric vector.} \item{\code{populations}}{The number of populations in an enrichment design. Is a numeric vector of length 1 containing a whole number.} \item{\code{effectList}}{The list of subsets, prevalences and effect sizes with columns and number of rows reflecting the different situations to be considered.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. When testing means and rates, a non-stratified analysis can be performed on overall data. For survival data, only a stratified analysis is possible. Is a logical vector of length 1.} \item{\code{adaptations}}{Indicates whether or not an adaptation takes place at interim k. Is a logical vector of length \code{kMax} minus 1.} \item{\code{typeOfSelection}}{The way the treatment arms or populations are selected at interim. Is a character vector of length 1.} \item{\code{effectMeasure}}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (\code{"effectEstimate"}). Is a character vector of length 1.} \item{\code{successCriterion}}{Defines when the study is stopped for efficacy at interim. \code{"all"} stops the trial if the efficacy criterion has been fulfilled for all selected treatment arms/populations, \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be superior to control at interim. Is a character vector of length 1.} \item{\code{epsilonValue}}{Needs to be specified if \code{typeOfSelection = "epsilon"}. Is a numeric vector of length 1.} \item{\code{rValue}}{Needs to be specified if \code{typeOfSelection = "rBest"}. Is a numeric vector of length 1.} \item{\code{threshold}}{The selection criterion: treatment arm/population is only selected if \code{effectMeasure} exceeds \code{threshold}. Either a single numeric value or a numeric vector of length \code{activeArms} referring to a separate threshold condition for each treatment arm.} \item{\code{selectPopulationsFunction}}{An optional function that can be entered to define the way of how populations are selected.} \item{\code{correlationComputation}}{If \code{"alternative"}, a correlation matrix according to Deng et al. (Biometrics, 2019) accounting for the respective alternative is used for simulating log-rank statistics in the many-to-one design. If \code{"null"}, a constant correlation matrix valid under the null hypothesis is used.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{selectedPopulations}}{The selected populations in enrichment designs.} \item{\code{numberOfPopulations}}{The number of populations in an enrichment design. Is a numeric matrix.} \item{\code{rejectAtLeastOne}}{The probability to reject at least one of the (multiple) hypotheses. Is a numeric vector.} \item{\code{rejectedPopulationsPerStage}}{The simulated number of rejected populations per stage.} \item{\code{successPerStage}}{The simulated success probabilities per stage where success is defined by user. Is a numeric matrix.} \item{\code{eventsPerStage}}{The number of events per stage. Is a numeric matrix.} \item{\code{singleNumberOfEventsPerStage}}{In simulation results data set: the number of events per stage that is used for the analysis.} \item{\code{conditionalPowerAchieved}}{The calculated conditional power, under the assumption of observed or assumed effect sizes. Is a numeric matrix.} }} \keyword{internal} rpact/man/plot.StageResults.Rd0000644000176200001440000001275314417202031016031 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \name{plot.StageResults} \alias{plot.StageResults} \title{Stage Results Plotting} \usage{ \method{plot}{StageResults}( x, y, ..., type = 1L, nPlanned, allocationRatioPlanned = 1, main = NA_character_, xlab = NA_character_, ylab = NA_character_, legendTitle = NA_character_, palette = "Set1", legendPosition = NA_integer_, showSource = FALSE, plotSettings = NULL ) } \arguments{ \item{x}{The stage results at given stage, obtained from \code{\link[=getStageResults]{getStageResults()}} or \code{\link[=getAnalysisResults]{getAnalysisResults()}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional \link[=param_three_dots_plot]{plot arguments}. Furthermore the following arguments can be defined: \itemize{ \item \code{thetaRange}: A range of assumed effect sizes if testing means or a survival design was specified. Additionally, if testing means was selected, an assumed standard deviation can be specified (default is 1). \item \code{piTreatmentRange}: A range of assumed rates pi1 to calculate the conditional power. Additionally, if a two-sample comparison was selected, pi2 can be specified (default is the value from \code{\link[=getAnalysisResults]{getAnalysisResults()}}). \item \code{directionUpper}: Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. \item \code{\link[=param_thetaH0]{thetaH0}}: The null hypothesis value, default is 0 for the normal and the binary case, it is 1 for the survival case. For testing a rate in one sample, a value thetaH0 in (0,1) has to be specified for defining the null hypothesis H0: pi = thetaH0. }} \item{type}{The plot type (default = 1). Note that at the moment only one type (the conditional power plot) is available.} \item{nPlanned}{The additional (i.e., "new" and not cumulative) sample size planned for each of the subsequent stages. The argument must be a vector with length equal to the number of remaining stages and contain the combined sample size from both treatment groups if two groups are considered. For survival outcomes, it should contain the planned number of additional events. For multi-arm designs, it is the per-comparison (combined) sample size. For enrichment designs, it is the (combined) sample size for the considered sub-population.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{legendTitle}{The legend title.} \item{palette}{The palette, default is \code{"Set1"}.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{Logical. If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with the base R \code{plot} function. Alternatively \code{showSource} can be defined as one of the following character values: \itemize{ \item \code{"commands"}: returns a character vector with plot commands \item \code{"axes"}: returns a list with the axes definitions \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function does not stop if an error occurs) \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function stops if an error occurs) } Note: no plot object will be returned if \code{showSource} is a character.} \item{plotSettings}{An object of class \code{PlotSettings} created by \code{\link[=getPlotSettings]{getPlotSetting()s}}.} } \value{ Returns a \code{ggplot2} object. } \description{ Plots the conditional power together with the likelihood function. } \details{ Generic function to plot all kinds of stage results. The conditional power is calculated only if effect size and sample size is specified. } \examples{ design <- getDesignGroupSequential( kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.5, 0.8, 1), typeOfDesign = "WT", deltaWT = 0.25 ) dataExample <- getDataset( n = c(20, 30, 30), means = c(50, 51, 55), stDevs = c(130, 140, 120) ) stageResults <- getStageResults(design, dataExample, thetaH0 = 20) \dontrun{ if (require(ggplot2)) plot(stageResults, nPlanned = c(30), thetaRange = c(0, 100)) } } rpact/man/summary.AnalysisResults.Rd0000644000176200001440000000551014335631007017271 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \name{summary.AnalysisResults} \alias{summary.AnalysisResults} \title{Analysis Results Summary} \usage{ \method{summary}{AnalysisResults}(object, ..., type = 1, digits = NA_integer_) } \arguments{ \item{object}{An \code{\link{AnalysisResults}} object.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{digits}{Defines how many digits are to be used for numeric values. Must be a positive integer of length 1.} } \value{ Returns a \code{\link{SummaryFactory}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object } } \description{ Displays a summary of \code{\link{AnalysisResults}} object. } \details{ Summarizes the parameters and results of an analysis results object. } \section{Summary options}{ The following options can be set globally: \enumerate{ \item \code{rpact.summary.output.size}: one of \code{c("small", "medium", "large")}; defines how many details will be included into the summary; default is \code{"large"}, i.e., all available details are displayed. \item \code{rpact.summary.justify}: one of \code{c("right", "left", "centre")}; shall the values be right-justified (the default), left-justified or centered. \item \code{rpact.summary.width}: defines the maximum number of characters to be used per line (default is \code{83}). \item \code{rpact.summary.intervalFormat}: defines how intervals will be displayed in the summary, default is \code{"[\%s; \%s]"}. \item \code{rpact.summary.digits}: defines how many digits are to be used for numeric values (default is \code{3}). \item \code{rpact.summary.digits.probs}: defines how many digits are to be used for numeric values (default is one more than value of \code{rpact.summary.digits}, i.e., \code{4}). \item \code{rpact.summary.trim.zeroes}: if \code{TRUE} (default) zeroes will always displayed as "0", e.g. "0.000" will become "0". } Example: \code{options("rpact.summary.intervalFormat" = "\%s - \%s")} } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \keyword{internal} rpact/man/TrialDesignSet.Rd0000644000176200001440000000164214402556623015320 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \docType{class} \name{TrialDesignSet} \alias{TrialDesignSet} \title{Class for trial design sets.} \description{ \code{TrialDesignSet} is a class for creating a collection of different trial designs. } \details{ This object cannot be created directly; better use \code{\link[=getDesignSet]{getDesignSet()}} with suitable arguments to create a set of designs. } \section{Fields}{ \describe{ \item{\code{designs}}{The trial designs to be compared.} \item{\code{design}}{The trial design.} \item{\code{variedParameters}}{A character vector containing the names of the parameters that vary between designs.} }} \section{Methods}{ \describe{ \item{\code{add(...)}}{Adds 'designs' OR a 'design' and/or a design parameter, e.g., deltaWT = c(0.1, 0.3, 0.4)} }} \seealso{ \code{\link[=getDesignSet]{getDesignSet()}} } \keyword{internal} rpact/man/param_stDevSimulation.Rd0000644000176200001440000000110114335631011016725 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_stDevSimulation} \alias{param_stDevSimulation} \title{Parameter Description: Standard Deviation for Simulation} \arguments{ \item{stDev}{The standard deviation under which the data is simulated, default is \code{1}. If \code{meanRatio = TRUE} is specified, \code{stDev} defines the coefficient of variation \code{sigma / mu2}. Must be a positive numeric of length 1.} } \description{ Parameter Description: Standard Deviation for Simulation } \keyword{internal} rpact/man/names.StageResults.Rd0000644000176200001440000000114214261025267016157 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \name{names.StageResults} \alias{names.StageResults} \title{Names of a Stage Results Object} \usage{ \method{names}{StageResults}(x) } \arguments{ \item{x}{A \code{\link{StageResults}} object.} } \value{ Returns a \code{\link[base]{character}} vector containing the names of the \code{\link{AnalysisResults}} object. } \description{ Function to get the names of a \code{\link{StageResults}} object. } \details{ Returns the names of stage results that can be accessed by the user. } \keyword{internal} rpact/man/getObjectRCode.Rd0000644000176200001440000000511514445304766015267 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_object_r_code.R \name{rcmd} \alias{rcmd} \alias{getObjectRCode} \title{Get Object R Code} \usage{ rcmd( obj, ..., leadingArguments = NULL, includeDefaultParameters = FALSE, stringWrapParagraphWidth = 90, prefix = "", postfix = "", stringWrapPrefix = "", newArgumentValues = list() ) getObjectRCode( obj, ..., leadingArguments = NULL, includeDefaultParameters = FALSE, stringWrapParagraphWidth = 90, prefix = "", postfix = "", stringWrapPrefix = "", newArgumentValues = list(), tolerance = 1e-07, pipeOperator = c("auto", "none", "magrittr", "R"), output = c("vector", "cat", "test", "markdown", "internal"), explicitPrint = FALSE ) } \arguments{ \item{obj}{The result object.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{leadingArguments}{A character vector with arguments that shall be inserted at the beginning of the function command, e.g., \code{design = x}. Be careful with this option because the created R command may no longer be valid if used.} \item{includeDefaultParameters}{If \code{TRUE}, default parameters will be included in all \code{rpact} commands; default is \code{FALSE}.} \item{stringWrapParagraphWidth}{An integer value defining the number of characters after which a line break shall be inserted; set to \code{NULL} to insert no line breaks.} \item{prefix}{A character string that shall be added to the beginning of the R command.} \item{postfix}{A character string that shall be added to the end of the R command.} \item{stringWrapPrefix}{A prefix character string that shall be added to each new line, typically some spaces.} \item{newArgumentValues}{A named list with arguments that shall be renewed in the R command, e.g., \code{newArgumentValues = list(informationRates = c(0.5, 1))}.} \item{tolerance}{The tolerance for defining a value as default.} \item{pipeOperator}{The pipe operator to use in the R code, default is "none".} \item{output}{The output format, default is a character "vector".} \item{explicitPrint}{Show an explicit \code{print} command, default is \code{FALSE}.} } \value{ A \code{\link[base]{character}} value or vector will be returned. } \description{ Returns the R source command of a result object. } \details{ \code{\link[=getObjectRCode]{getObjectRCode()}} (short: \code{\link[=rcmd]{rcmd()}}) recreates the R commands that result in the specified object \code{obj}. \code{obj} must be an instance of class \code{ParameterSet}. } rpact/man/StageResultsEnrichmentSurvival.Rd0000644000176200001440000000317014450467342020635 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsEnrichmentSurvival} \alias{StageResultsEnrichmentSurvival} \title{Stage Results Enrichment Survival} \description{ Class for stage results of enrichment survival data. } \details{ This object cannot be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of enrichment survival. } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{pValues}}{The stage-wise p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{weightsFisher}}{The weights for Fisher's combination test. Is a numeric vector of length \code{kMax}.} \item{\code{weightsInverseNormal}}{The weights for the inverse normal statistic. Is a numeric vector of length \code{kMax}.} }} \keyword{internal} rpact/man/param_stage.Rd0000644000176200001440000000055014232463333014713 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_stage} \alias{param_stage} \title{Parameter Description: Stage} \arguments{ \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} } \description{ Parameter Description: Stage } \keyword{internal} rpact/man/dataEnrichmentSurvival.Rd0000644000176200001440000000107314313321260017103 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataEnrichmentSurvival} \alias{dataEnrichmentSurvival} \title{Enrichment Dataset of Survival Data} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataEnrichmentSurvival } \description{ A dataset containing the log-rank statistics, events, and allocation ratios of two groups. Use \code{getDataset(dataEnrichmentSurvival)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/AnalysisResultsGroupSequential.Rd0000644000176200001440000001117114450467342020654 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{AnalysisResultsGroupSequential} \alias{AnalysisResultsGroupSequential} \title{Analysis Results Group Sequential} \description{ Class for analysis results results based on a group sequential design. } \details{ This object cannot be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the analysis results of a group sequential design. } \section{Fields}{ \describe{ \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{thetaH0}}{The difference or assumed effect under H0. Is a numeric vector of length 1.} \item{\code{pi1}}{The assumed probability or probabilities in the active treatment group in two-group designs, or the alternative probability for a one-group design.} \item{\code{pi2}}{The assumed probability in the reference group for two-group designs. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{assumedStDev}}{The assumed standard deviation(s) for means analysis. Is a numeric vector.} \item{\code{equalVariances}}{Describes if the variances in two treatment groups are assumed to be the same. Is a logical vector of length 1.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{conditionalRejectionProbabilities}}{The probabilities of rejecting the null hypothesis at each stage, given the stage has been reached. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{repeatedConfidenceIntervalLowerBounds}}{The lower bound of the confidence intervals that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedConfidenceIntervalUpperBounds}}{The upper bound of the confidence interval that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax}.} \item{\code{repeatedPValues}}{The p-values that are calculated at any stage of the trial. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{finalStage}}{The stage at which the trial ends, either with acceptance or rejection of the null hypothesis. Is a numeric vector of length 1.} \item{\code{finalPValues}}{The final p-value that is based on the stage-wise ordering. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{finalConfidenceIntervalLowerBounds}}{The lower bound of the confidence interval that is based on the stage-wise ordering. Is a numeric vector of length \code{kMax}.} \item{\code{finalConfidenceIntervalUpperBounds}}{The upper bound of the confidence interval that is based on the stage-wise ordering. Is a numeric vector of length \code{kMax}.} \item{\code{medianUnbiasedEstimates}}{The calculated median unbiased estimates that are based on the stage-wise ordering. Is a numeric vector of length \code{kMax}.} \item{\code{maxInformation}}{The maximum information. Is a numeric vector of length 1 containing a whole number.} \item{\code{informationEpsilon}}{The absolute information epsilon, which defines the maximum distance from the observed information to the maximum information that causes the final analysis. Updates at the final analysis if the observed information at the final analysis is smaller ("under-running") than the planned maximum information. Is either a positive integer value specifying the absolute information epsilon or a floating point number >0 and <1 to define a relative information epsilon.} }} \keyword{internal} rpact/man/kable.ParameterSet.Rd0000644000176200001440000000201414277172600016101 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{kable.ParameterSet} \alias{kable.ParameterSet} \title{Create output in Markdown} \usage{ kable.ParameterSet(x, ...) } \arguments{ \item{x}{A \code{ParameterSet}. If x does not inherit from class \code{\link{ParameterSet}}, \code{knitr::kable(x)} will be returned.} \item{...}{Other arguments (see \code{\link[knitr]{kable}}).} } \description{ The \code{kable()} function returns the output of the specified object formatted in Markdown. } \details{ Generic function to represent a parameter set in Markdown. Use \code{options("rpact.print.heading.base.number" = "NUMBER")} (where \code{NUMBER} is an integer value >= -1) to specify the heading level. The default is \code{options("rpact.print.heading.base.number" = "0")}, i.e., the top headings start with \code{##} in Markdown. \code{options("rpact.print.heading.base.number" = "-1")} means that all headings will be written bold but are not explicit defined as header. } rpact/man/t-FieldSet-method.Rd0000644000176200001440000000070114232463333015644 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{t,FieldSet-method} \alias{t,FieldSet-method} \title{Field Set Transpose} \usage{ \S4method{t}{FieldSet}(x) } \arguments{ \item{x}{A \code{FieldSet}.} } \description{ Given a \code{FieldSet} \code{x}, t returns the transpose of \code{x}. } \details{ Implementation of the base R generic function \code{\link[base]{t}} } \keyword{internal} rpact/man/utilitiesForSurvivalTrials.Rd0000644000176200001440000000411614335631007020026 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_utilities.R \name{utilitiesForSurvivalTrials} \alias{utilitiesForSurvivalTrials} \alias{getLambdaByPi} \alias{getLambdaByMedian} \alias{getHazardRatioByPi} \alias{getPiByLambda} \alias{getPiByMedian} \alias{getMedianByLambda} \alias{getMedianByPi} \title{Survival Helper Functions for Conversion of Pi, Lambda, Median} \usage{ getLambdaByPi(piValue, eventTime = 12, kappa = 1) getLambdaByMedian(median, kappa = 1) getHazardRatioByPi(pi1, pi2, eventTime = 12, kappa = 1) getPiByLambda(lambda, eventTime = 12, kappa = 1) getPiByMedian(median, eventTime = 12, kappa = 1) getMedianByLambda(lambda, kappa = 1) getMedianByPi(piValue, eventTime = 12, kappa = 1) } \arguments{ \item{piValue, pi1, pi2, lambda, median}{Value that shall be converted.} \item{eventTime}{The assumed time under which the event rates are calculated, default is \code{12}.} \item{kappa}{A numeric value > 0. A \code{kappa != 1} will be used for the specification of the shape of the Weibull distribution. Default is \code{1}, i.e., the exponential survival distribution is used instead of the Weibull distribution. Note that the Weibull distribution cannot be used for the piecewise definition of the survival time distribution, i.e., only \code{piecewiselambda} (as a single value) and \code{kappa} can be specified. This function is equivalent to \code{pweibull(t, shape = kappa, scale = 1 / lambda)} of the \code{stats} package, i.e., the scale parameter is \code{1 / 'hazard rate'}.\cr For example, \code{getPiecewiseExponentialDistribution(time = 130, piecewiseLambda = 0.01, kappa = 4.2)} and \code{pweibull(q = 130, shape = 4.2, scale = 1 / 0.01)} provide the sample result.} } \value{ Returns a \code{\link[base]{numeric}} value or vector will be returned. } \description{ Functions to convert pi, lambda and median values into each other. } \details{ Can be used, e.g., to convert median values into pi or lambda values for usage in \code{\link[=getSampleSizeSurvival]{getSampleSizeSurvival()}} or \code{\link[=getPowerSurvival]{getPowerSurvival()}}. } rpact/man/getSimulationSurvival.Rd0000644000176200001440000007157214400317257017024 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_simulation_base_survival.R \name{getSimulationSurvival} \alias{getSimulationSurvival} \title{Get Simulation Survival} \usage{ getSimulationSurvival( design = NULL, ..., thetaH0 = 1, directionUpper = TRUE, pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, hazardRatio = NA_real_, kappa = 1, piecewiseSurvivalTime = NA_real_, allocation1 = 1, allocation2 = 1, eventTime = 12, accrualTime = c(0, 12), accrualIntensity = 0.1, accrualIntensityType = c("auto", "absolute", "relative"), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, maxNumberOfSubjects = NA_real_, plannedEvents = NA_real_, minNumberOfEventsPerStage = NA_real_, maxNumberOfEventsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, maxNumberOfIterations = 1000L, maxNumberOfRawDatasetsPerStage = 0, longTimeSimulationAllowed = FALSE, seed = NA_real_, calcEventsFunction = NULL, showStatistics = FALSE ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{pi1}{A numeric value or vector that represents the assumed event rate in the treatment group, default is \code{seq(0.2, 0.5, 0.1)} (power calculations and simulations) or \code{seq(0.4, 0.6, 0.1)} (sample size calculations).} \item{pi2}{A numeric value that represents the assumed event rate in the control group, default is \code{0.2}.} \item{lambda1}{The assumed hazard rate in the treatment group, there is no default. \code{lambda1} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} \item{lambda2}{The assumed hazard rate in the reference group, there is no default. \code{lambda2} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} \item{median1}{The assumed median survival time in the treatment group, there is no default.} \item{median2}{The assumed median survival time in the reference group, there is no default. Must be a positive numeric of length 1.} \item{hazardRatio}{The vector of hazard ratios under consideration. If the event or hazard rates in both treatment groups are defined, the hazard ratio needs not to be specified as it is calculated, there is no default. Must be a positive numeric of length 1.} \item{kappa}{A numeric value > 0. A \code{kappa != 1} will be used for the specification of the shape of the Weibull distribution. Default is \code{1}, i.e., the exponential survival distribution is used instead of the Weibull distribution. Note that the Weibull distribution cannot be used for the piecewise definition of the survival time distribution, i.e., only \code{piecewiselambda} (as a single value) and \code{kappa} can be specified. This function is equivalent to \code{pweibull(t, shape = kappa, scale = 1 / lambda)} of the \code{stats} package, i.e., the scale parameter is \code{1 / 'hazard rate'}.\cr For example, \code{getPiecewiseExponentialDistribution(time = 130, piecewiseLambda = 0.01, kappa = 4.2)} and \code{pweibull(q = 130, shape = 4.2, scale = 1 / 0.01)} provide the sample result.} \item{piecewiseSurvivalTime}{A vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function \cr (for details see \code{\link[=getPiecewiseSurvivalTime]{getPiecewiseSurvivalTime()}}).} \item{allocation1}{The number how many subjects are assigned to treatment 1 in a subsequent order, default is \code{1}} \item{allocation2}{The number how many subjects are assigned to treatment 2 in a subsequent order, default is \code{1}} \item{eventTime}{The assumed time under which the event rates are calculated, default is \code{12}.} \item{accrualTime}{The assumed accrual time intervals for the study, default is \code{c(0, 12)} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{accrualIntensity}{A numeric vector of accrual intensities, default is the relative intensity \code{0.1} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}).} \item{accrualIntensityType}{A character value specifying the accrual intensity input type. Must be one of \code{"auto"}, \code{"absolute"}, or \code{"relative"}; default is \code{"auto"}, i.e., if all values are < 1 the type is \code{"relative"}, otherwise it is \code{"absolute"}.} \item{dropoutRate1}{The assumed drop-out rate in the treatment group, default is \code{0}.} \item{dropoutRate2}{The assumed drop-out rate in the control group, default is \code{0}.} \item{dropoutTime}{The assumed time for drop-out rates in the control and the treatment group, default is \code{12}.} \item{maxNumberOfSubjects}{\code{maxNumberOfSubjects > 0} needs to be specified. If accrual time and accrual intensity are specified, this will be calculated. Must be a positive integer of length 1.} \item{plannedEvents}{\code{plannedEvents} is a numeric vector of length \code{kMax} (the number of stages of the design) that determines the number of cumulated (overall) events in survival designs when the interim stages are planned. For two treatment arms, it is the number of events for both treatment arms. For multi-arm designs, \code{plannedEvents} refers to the overall number of events for the selected arms plus control.} \item{minNumberOfEventsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{minNumberOfEventsPerStage} with length kMax determines the minimum number of events per stage (i.e., not cumulated), the first element is not taken into account.} \item{maxNumberOfEventsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{maxNumberOfEventsPerStage} with length kMax determines the maximum number of events per stage (i.e., not cumulated), the first element is not taken into account.} \item{conditionalPower}{If \code{conditionalPower} together with \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (or \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} for survival designs) is specified, a sample size recalculation based on the specified conditional power is performed. It is defined as the power for the subsequent stage given the current data. By default, the conditional power will be calculated under the observed effect size. Optionally, you can also specify \code{thetaH1} and \code{stDevH1} (for simulating means), \code{pi1H1} and \code{pi2H1} (for simulating rates), or \code{thetaH1} (for simulating hazard ratios) as parameters under which it is calculated and the sample size recalculation is performed.} \item{thetaH1}{If specified, the value of the alternative under which the conditional power or sample size recalculation calculation is performed. Must be a numeric of length 1.} \item{maxNumberOfIterations}{The number of simulation iterations, default is \code{1000}. Must be a positive integer of length 1.} \item{maxNumberOfRawDatasetsPerStage}{The number of raw datasets per stage that shall be extracted and saved as \code{\link[base]{data.frame}}, default is \code{0}. \code{\link[=getRawData]{getRawData()}} can be used to get the extracted raw data from the object.} \item{longTimeSimulationAllowed}{Logical that indicates whether long time simulations that consumes more than 30 seconds are allowed or not, default is \code{FALSE}.} \item{seed}{The seed to reproduce the simulation, default is a random seed.} \item{calcEventsFunction}{Optionally, a function can be entered that defines the way of performing the sample size recalculation. By default, event number recalculation is performed with conditional power and specified \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} (see details and examples).} \item{showStatistics}{Logical. If \code{TRUE}, summary statistics of the simulated data are displayed for the \code{print} command, otherwise the output is suppressed, default is \code{FALSE}.} } \value{ Returns a \code{\link{SimulationResults}} object. The following generics (R generic functions) are available for this object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.SimulationResults]{plot()}} to plot the object, \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the analysis times, power, stopping probabilities, conditional power, and expected sample size for testing the hazard ratio in a two treatment groups survival design. } \details{ At given design the function simulates the power, stopping probabilities, conditional power, and expected sample size at given number of events, number of subjects, and parameter configuration. It also simulates the time when the required events are expected under the given assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times and constant or non-constant piecewise accrual). Additionally, integers \code{allocation1} and \code{allocation2} can be specified that determine the number allocated to treatment group 1 and treatment group 2, respectively. More precisely, unequal randomization ratios must be specified via the two integer arguments \code{allocation1} and \code{allocation2} which describe how many subjects are consecutively enrolled in each group, respectively, before a subject is assigned to the other group. For example, the arguments \code{allocation1 = 2}, \code{allocation2 = 1}, \code{maxNumberOfSubjects = 300} specify 2:1 randomization with 200 subjects randomized to intervention and 100 to control. (Caveat: Do not use \code{allocation1 = 200}, \code{allocation2 = 100}, \code{maxNumberOfSubjects = 300} as this would imply that the 200 intervention subjects are enrolled prior to enrollment of any control subjects.) \code{conditionalPower}\cr The definition of \code{thetaH1} makes only sense if \code{kMax} > 1 and if \code{conditionalPower}, \code{minNumberOfEventsPerStage}, and \code{maxNumberOfEventsPerStage} are defined. Note that \code{numberOfSubjects}, \code{numberOfSubjects1}, and \code{numberOfSubjects2} in the output are the expected number of subjects. \code{calcEventsFunction}\cr This function returns the number of events at given conditional power and conditional critical value for specified testing situation. The function might depend on variables \code{stage}, \code{conditionalPower}, \code{thetaH0}, \code{plannedEvents}, \code{eventsPerStage}, \code{minNumberOfEventsPerStage}, \code{maxNumberOfEventsPerStage}, \code{allocationRatioPlanned}, \code{conditionalCriticalValue}, The function has to contain the three-dots argument '...' (see examples). } \section{Piecewise survival time}{ The first element of the vector \code{piecewiseSurvivalTime} must be equal to \code{0}. \code{piecewiseSurvivalTime} can also be a list that combines the definition of the time intervals and hazard rates in the reference group. The definition of the survival time in the treatment group is obtained by the specification of the hazard ratio (see examples for details). } \section{Staggered patient entry}{ \code{accrualTime} is the time period of subjects' accrual in a study. It can be a value that defines the end of accrual or a vector. In this case, \code{accrualTime} can be used to define a non-constant accrual over time. For this, \code{accrualTime} is a vector that defines the accrual intervals. The first element of \code{accrualTime} must be equal to \code{0} and, additionally, \code{accrualIntensity} needs to be specified. \code{accrualIntensity} itself is a value or a vector (depending on the length of \code{accrualTime}) that defines the intensity how subjects enter the trial in the intervals defined through \code{accrualTime}. \code{accrualTime} can also be a list that combines the definition of the accrual time and accrual intensity (see below and examples for details). If the length of \code{accrualTime} and the length of \code{accrualIntensity} are the same (i.e., the end of accrual is undefined), \code{maxNumberOfSubjects > 0} needs to be specified and the end of accrual is calculated. In that case, \code{accrualIntensity} is the number of subjects per time unit, i.e., the absolute accrual intensity. If the length of \code{accrualTime} equals the length of \code{accrualIntensity - 1} (i.e., the end of accrual is defined), \code{maxNumberOfSubjects} is calculated if the absolute accrual intensity is given. If all elements in \code{accrualIntensity} are smaller than 1, \code{accrualIntensity} defines the \emph{relative} intensity how subjects enter the trial. For example, \code{accrualIntensity = c(0.1, 0.2)} specifies that in the second accrual interval the intensity is doubled as compared to the first accrual interval. The actual (absolute) accrual intensity is calculated for the calculated or given \code{maxNumberOfSubjects}. Note that the default is \code{accrualIntensity = 0.1} meaning that the \emph{absolute} accrual intensity will be calculated. } \section{Simulation Data}{ The summary statistics "Simulated data" contains the following parameters: median \link{range}; mean +/-sd\cr \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable the output of the aggregated simulated data.\cr Example 1: \cr \code{simulationResults <- getSimulationSurvival(maxNumberOfSubjects = 100, plannedEvents = 30)} \cr \code{simulationResults$show(showStatistics = FALSE)}\cr Example 2: \cr \code{simulationResults <- getSimulationSurvival(maxNumberOfSubjects = 100, plannedEvents = 30)} \cr \code{simulationResults$setShowStatistics(FALSE)}\cr \code{simulationResults}\cr \code{\link[=getData]{getData()}} can be used to get the aggregated simulated data from the object as \code{\link[base]{data.frame}}. The data frame contains the following columns: \enumerate{ \item \code{iterationNumber}: The number of the simulation iteration. \item \code{stageNumber}: The stage. \item \code{pi1}: The assumed or derived event rate in the treatment group. \item \code{pi2}: The assumed or derived event rate in the control group. \item \code{hazardRatio}: The hazard ratio under consideration (if available). \item \code{analysisTime}: The analysis time. \item \code{numberOfSubjects}: The number of subjects under consideration when the (interim) analysis takes place. \item \code{eventsPerStage1}: The observed number of events per stage in treatment group 1. \item \code{eventsPerStage2}: The observed number of events per stage in treatment group 2. \item \code{eventsPerStage}: The observed number of events per stage in both treatment groups. \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. \item \code{eventsNotAchieved}: 1 if number of events could not be reached with observed number of subjects, 0 otherwise. \item \code{testStatistic}: The test statistic that is used for the test decision, depends on which design was chosen (group sequential, inverse normal, or Fisher combination test)' \item \code{logRankStatistic}: Z-score statistic which corresponds to a one-sided log-rank test at considered stage. \item \code{hazardRatioEstimateLR}: The estimated hazard ratio, derived from the log-rank statistic. \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for selected sample size and effect. The effect is either estimated from the data or can be user defined with \code{thetaH1}. } } \section{Raw Data}{ \code{\link[=getRawData]{getRawData()}} can be used to get the simulated raw data from the object as \code{\link[base]{data.frame}}. Note that \code{getSimulationSurvival()} must called before with \code{maxNumberOfRawDatasetsPerStage} > 0. The data frame contains the following columns: \enumerate{ \item \code{iterationNumber}: The number of the simulation iteration. \item \code{stopStage}: The stage of stopping. \item \code{subjectId}: The subject id (increasing number 1, 2, 3, ...) \item \code{accrualTime}: The accrual time, i.e., the time when the subject entered the trial. \item \code{treatmentGroup}: The treatment group number (1 or 2). \item \code{survivalTime}: The survival time of the subject. \item \code{dropoutTime}: The dropout time of the subject (may be \code{NA}). \item \code{observationTime}: The specific observation time. \item \code{timeUnderObservation}: The time under observation is defined as follows:\cr if (event == TRUE) {\cr timeUnderObservation <- survivalTime;\cr } else if (dropoutEvent == TRUE) {\cr timeUnderObservation <- dropoutTime;\cr } else {\cr timeUnderObservation <- observationTime - accrualTime;\cr } \item \code{event}: \code{TRUE} if an event occurred; \code{FALSE} otherwise. \item \code{dropoutEvent}: \code{TRUE} if an dropout event occurred; \code{FALSE} otherwise. } } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Fixed sample size with minimum required definitions, pi1 = (0.3,0.4,0.5,0.6) and # pi2 = 0.3 at event time 12, and accrual time 24 getSimulationSurvival( pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, eventTime = 12, accrualTime = 24, plannedEvents = 40, maxNumberOfSubjects = 200, maxNumberOfIterations = 10 ) \dontrun{ # Increase number of simulation iterations getSimulationSurvival( pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, eventTime = 12, accrualTime = 24, plannedEvents = 40, maxNumberOfSubjects = 200, maxNumberOfIterations = 50 ) # Determine necessary accrual time with default settings if 200 subjects and # 30 subjects per time unit can be recruited getSimulationSurvival( plannedEvents = 40, accrualTime = 0, accrualIntensity = 30, maxNumberOfSubjects = 200, maxNumberOfIterations = 50 ) # Determine necessary accrual time with default settings if 200 subjects and # if the first 6 time units 20 subjects per time unit can be recruited, # then 30 subjects per time unit getSimulationSurvival( plannedEvents = 40, accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200, maxNumberOfIterations = 50 ) # Determine maximum number of Subjects with default settings if the first # 6 time units 20 subjects per time unit can be recruited, and after # 10 time units 30 subjects per time unit getSimulationSurvival( plannedEvents = 40, accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30), maxNumberOfIterations = 50 ) # Specify accrual time as a list at <- list( "0 - <6" = 20, "6 - Inf" = 30 ) getSimulationSurvival( plannedEvents = 40, accrualTime = at, maxNumberOfSubjects = 200, maxNumberOfIterations = 50 ) # Specify accrual time as a list, if maximum number of subjects need to be calculated at <- list( "0 - <6" = 20, "6 - <=10" = 30 ) getSimulationSurvival(plannedEvents = 40, accrualTime = at, maxNumberOfIterations = 50) # Specify effect size for a two-stage group sequential design with # O'Brien & Fleming boundaries. Effect size is based on event rates # at specified event time, directionUpper = FALSE needs to be specified # because it should be shown that hazard ratio < 1 designGS <- getDesignGroupSequential(kMax = 2) getSimulationSurvival( design = designGS, pi1 = 0.2, pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50 ) # As above, but with a three-stage O'Brien and Fleming design with # specified information rates, note that planned events consists of integer values designGS2 <- getDesignGroupSequential(informationRates = c(0.4, 0.7, 1)) getSimulationSurvival( design = designGS2, pi1 = 0.2, pi2 = 0.3, eventTime = 24, plannedEvents = round(designGS2$informationRates * 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50 ) # Effect size is based on event rate at specified event time for the reference # group and hazard ratio, directionUpper = FALSE needs to be specified because # it should be shown that hazard ratio < 1 getSimulationSurvival( design = designGS, hazardRatio = 0.5, pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50 ) # Effect size is based on hazard rate for the reference group and # hazard ratio, directionUpper = FALSE needs to be specified because # it should be shown that hazard ratio < 1 getSimulationSurvival( design = designGS, hazardRatio = 0.5, lambda2 = 0.02, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50 ) # Specification of piecewise exponential survival time and hazard ratios, # note that in getSimulationSurvival only on hazard ratio is used # in the case that the survival time is piecewise expoential getSimulationSurvival( design = designGS, piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), hazardRatio = 1.5, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 50 ) pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04 ) getSimulationSurvival( design = designGS, piecewiseSurvivalTime = pws, hazardRatio = c(1.5), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 50 ) # Specification of piecewise exponential survival time for both treatment arms getSimulationSurvival( design = designGS, piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015, 0.03, 0.06), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 50 ) # Specification of piecewise exponential survival time as a list, # note that in getSimulationSurvival only on hazard ratio # (not a vector) can be used pws <- list( "0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04 ) getSimulationSurvival( design = designGS, piecewiseSurvivalTime = pws, hazardRatio = 1.5, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 50 ) # Specification of piecewise exponential survival time and delayed effect # (response after 5 time units) getSimulationSurvival( design = designGS, piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.01, 0.02, 0.06), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 50 ) # Specify effect size based on median survival times getSimulationSurvival( median1 = 5, median2 = 3, plannedEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50 ) # Specify effect size based on median survival # times of Weibull distribtion with kappa = 2 getSimulationSurvival( median1 = 5, median2 = 3, kappa = 2, plannedEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 50 ) # Perform recalculation of number of events based on conditional power for a # three-stage design with inverse normal combination test, where the conditional power # is calculated under the specified effect size thetaH1 = 1.3 and up to a four-fold # increase in originally planned sample size (number of events) is allowed. # Note that the first value in minNumberOfEventsPerStage and # maxNumberOfEventsPerStage is arbitrary, i.e., it has no effect. designIN <- getDesignInverseNormal(informationRates = c(0.4, 0.7, 1)) resultsWithSSR1 <- getSimulationSurvival( design = designIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, thetaH1 = 1.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 50 ) resultsWithSSR1 # If thetaH1 is unspecified, the observed hazard ratio estimate # (calculated from the log-rank statistic) is used for performing the # recalculation of the number of events resultsWithSSR2 <- getSimulationSurvival( design = designIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 50 ) resultsWithSSR2 # Compare it with design without event size recalculation resultsWithoutSSR <- getSimulationSurvival( design = designIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 145), maxNumberOfSubjects = 800, maxNumberOfIterations = 50 ) resultsWithoutSSR$overallReject resultsWithSSR1$overallReject resultsWithSSR2$overallReject # Confirm that event size racalcuation increases the Type I error rate, # i.e., you have to use the combination test resultsWithSSRGS <- getSimulationSurvival( design = designGS2, hazardRatio = seq(1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 145), minNumberOfEventsPerStage = c(NA, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 50 ) resultsWithSSRGS$overallReject # Set seed to get reproducable results identical( getSimulationSurvival( plannedEvents = 40, maxNumberOfSubjects = 200, seed = 99 )$analysisTime, getSimulationSurvival( plannedEvents = 40, maxNumberOfSubjects = 200, seed = 99 )$analysisTime ) # Perform recalculation of number of events based on conditional power as above. # The number of events is recalculated only in the first interim, the recalculated number # is also used for the final stage. Here, we use the user defind calcEventsFunction as # follows (note that the last stage value in minNumberOfEventsPerStage and maxNumberOfEventsPerStage # has no effect): myCalcEventsFunction <- function(..., stage, conditionalPower, estimatedTheta, plannedEvents, eventsOverStages, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, conditionalCriticalValue) { theta <- max(1 + 1e-12, estimatedTheta) if (stage == 2) { requiredStageEvents <- max(0, conditionalCriticalValue + qnorm(conditionalPower))^2 / log(theta)^2 requiredStageEvents <- min( max(minNumberOfEventsPerStage[stage], requiredStageEvents), maxNumberOfEventsPerStage[stage] ) + eventsOverStages[stage - 1] } else { requiredStageEvents <- 2 * eventsOverStages[stage - 1] - eventsOverStages[1] } return(requiredStageEvents) } resultsWithSSR <- getSimulationSurvival( design = designIN, hazardRatio = seq(1, 2.6, 0.5), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA, 44, 4), maxNumberOfEventsPerStage = 4 * c(NA, 44, 4), maxNumberOfSubjects = 800, calcEventsFunction = myCalcEventsFunction, seed = 1234, maxNumberOfIterations = 50 ) } } rpact/man/dataSurvival.Rd0000644000176200001440000000101714313321260015064 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataSurvival} \alias{dataSurvival} \title{One-Arm Dataset of Survival Data} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataSurvival } \description{ A dataset containing the log-rank statistics, events, and allocation ratios of one group. Use \code{getDataset(dataSurvival)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/param_alpha.Rd0000644000176200001440000000055614312324046014677 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_alpha} \alias{param_alpha} \title{Parameter Description: Alpha} \arguments{ \item{alpha}{The significance level alpha, default is \code{0.025}. Must be a positive numeric of length 1.} } \description{ Parameter Description: Alpha } \keyword{internal} rpact/man/param_plotPointsEnabled.Rd0000644000176200001440000000061314312324046017232 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_plotPointsEnabled} \alias{param_plotPointsEnabled} \title{Parameter Description: Plot Points Enabled} \arguments{ \item{plotPointsEnabled}{Logical. If \code{TRUE}, additional points will be plotted.} } \description{ Parameter Description: Plot Points Enabled } \keyword{internal} rpact/man/printCitation.Rd0000644000176200001440000000123314400317257015256 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_utilities.R \name{printCitation} \alias{printCitation} \title{Print Citation} \usage{ printCitation(inclusiveR = TRUE, language = "en") } \arguments{ \item{inclusiveR}{If \code{TRUE} (default) the information on how to cite the base R system in publications will be added.} \item{language}{Language code to use for the output, default is "en".} } \description{ How to cite \code{rpact} and \code{R} in publications. } \details{ This function shows how to cite \code{rpact} and \code{R} (\code{inclusiveR = TRUE}) in publications. } \examples{ printCitation() } \keyword{internal} rpact/man/names.AnalysisResults.Rd0000644000176200001440000000127114313321257016676 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \name{names.AnalysisResults} \alias{names.AnalysisResults} \title{Names of a Analysis Results Object} \usage{ \method{names}{AnalysisResults}(x) } \arguments{ \item{x}{An \code{\link{AnalysisResults}} object created by \code{\link[=getAnalysisResults]{getAnalysisResults()}}.} } \value{ Returns a \code{\link[base]{character}} vector containing the names of the \code{\link{AnalysisResults}} object. } \description{ Function to get the names of an \code{\link{AnalysisResults}} object. } \details{ Returns the names of an analysis results that can be accessed by the user. } \keyword{internal} rpact/man/param_lambda2.Rd0000644000176200001440000000077014312324046015112 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_lambda2} \alias{param_lambda2} \title{Parameter Description: Lambda (2)} \arguments{ \item{lambda2}{The assumed hazard rate in the reference group, there is no default. \code{lambda2} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1.} } \description{ Parameter Description: Lambda (2) } \keyword{internal} rpact/man/StageResultsEnrichmentRates.Rd0000644000176200001440000000315114450467342020077 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsEnrichmentRates} \alias{StageResultsEnrichmentRates} \title{Stage Results Enrichment Rates} \description{ Class for stage results of enrichment rates data. } \details{ This object cannot be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of enrichment rates. } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{pValues}}{The stage-wise p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{weightsFisher}}{The weights for Fisher's combination test. Is a numeric vector of length \code{kMax}.} \item{\code{weightsInverseNormal}}{The weights for the inverse normal statistic. Is a numeric vector of length \code{kMax}.} }} \keyword{internal} rpact/man/as.data.frame.ParameterSet.Rd0000644000176200001440000000217314335631006017430 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{as.data.frame.ParameterSet} \alias{as.data.frame.ParameterSet} \title{Coerce Parameter Set to a Data Frame} \usage{ \method{as.data.frame}{ParameterSet}( x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ... ) } \arguments{ \item{x}{A \code{\link{FieldSet}} object.} \item{niceColumnNamesEnabled}{Logical. If \code{TRUE}, nice looking column names will be used; syntactic names (variable names) otherwise (see \code{\link[base]{make.names}}).} \item{includeAllParameters}{Logical. If \code{TRUE}, all available parameters will be included in the data frame; a meaningful parameter selection otherwise, default is \code{FALSE}.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} } \value{ Returns a \code{\link[base]{data.frame}}. } \description{ Returns the \code{ParameterSet} as data frame. } \details{ Coerces the parameter set to a data frame. } \keyword{internal} rpact/man/StageResultsMultiArmMeans.Rd0000644000176200001440000000740314450467342017526 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsMultiArmMeans} \alias{StageResultsMultiArmMeans} \title{Stage Results Multi Arm Means} \description{ Class for stage results of multi arm means data } \details{ This object cannot be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of multi arm means. } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{pValues}}{The stage-wise p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{weightsFisher}}{The weights for Fisher's combination test. Is a numeric vector of length \code{kMax}.} \item{\code{weightsInverseNormal}}{The weights for the inverse normal statistic. Is a numeric vector of length \code{kMax}.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{overallTestStatistics}}{The overall, i.e., cumulated test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{overallStDevs}}{The overall, i.e., cumulative standard deviations. Is a numeric vector of length number of stages times number of groups.} \item{\code{overallPooledStDevs}}{The overall pooled standard deviations. Is a numeric matrix.} \item{\code{overallPValues}}{The overall, i.e., cumulated p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{separatePValues}}{The p-values from the separate stages. Is a numeric matrix.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{singleStepAdjustedPValues}}{The adjusted p-value for testing multiple hypotheses per stage of the trial.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple (i.e., >2) treatment arms or population enrichment designs when testing means. Available options for multiple arms: \code{"overallPooled", "pairwisePooled", "notPooled"}. Available options for enrichment designs: \code{"pooled", "pooledFromFull", "notPooled"}.} \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} }} \keyword{internal} rpact/man/param_maxNumberOfEventsPerStage.Rd0000644000176200001440000000116114335631011020644 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_maxNumberOfEventsPerStage} \alias{param_maxNumberOfEventsPerStage} \title{Parameter Description: Max Number Of Events Per Stage} \arguments{ \item{maxNumberOfEventsPerStage}{When performing a data driven sample size recalculation, the numeric vector \code{maxNumberOfEventsPerStage} with length kMax determines the maximum number of events per stage (i.e., not cumulated), the first element is not taken into account.} } \description{ Parameter Description: Max Number Of Events Per Stage } \keyword{internal} rpact/man/StageResultsMultiArmRates.Rd0000644000176200001440000000626014450467342017541 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_stage_results.R \docType{class} \name{StageResultsMultiArmRates} \alias{StageResultsMultiArmRates} \title{Stage Results Multi Arm Rates} \description{ Class for stage results of multi arm rates data } \details{ This object cannot be created directly; use \code{getStageResults} with suitable arguments to create the stage results of a dataset of multi arm rates. } \section{Fields}{ \describe{ \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{pValues}}{The stage-wise p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{testActions}}{The test decisions at each stage of the trial. Is a character vector of length \code{kMax}.} \item{\code{weightsFisher}}{The weights for Fisher's combination test. Is a numeric vector of length \code{kMax}.} \item{\code{weightsInverseNormal}}{The weights for the inverse normal statistic. Is a numeric vector of length \code{kMax}.} \item{\code{combInverseNormal}}{The test statistics over stages for the inverse normal test. Is a numeric vector of length \code{kMax}.} \item{\code{combFisher}}{The test statistics over stages for Fisher's combination test. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{overallTestStatistics}}{The overall, i.e., cumulated test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{overallPValues}}{The overall, i.e., cumulated p-values. Is a numeric vector of length \code{kMax} containing values between 0 and 1.} \item{\code{testStatistics}}{The stage-wise test statistics. Is a numeric vector of length \code{kMax}.} \item{\code{separatePValues}}{The p-values from the separate stages. Is a numeric matrix.} \item{\code{effectSizes}}{The stage-wise effect sizes. Is a numeric vector of length \code{kMax}.} \item{\code{singleStepAdjustedPValues}}{The adjusted p-value for testing multiple hypotheses per stage of the trial.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{normalApproximation}}{Describes if a normal approximation was used when calculating p-values. Default for means is \code{FALSE} and \code{TRUE} for rates and hazard ratio. Is a logical vector of length 1.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} }} \keyword{internal} rpact/man/param_dropoutTime.Rd0000644000176200001440000000062114335631010016113 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_dropoutTime} \alias{param_dropoutTime} \title{Parameter Description: Dropout Time} \arguments{ \item{dropoutTime}{The assumed time for drop-out rates in the control and the treatment group, default is \code{12}.} } \description{ Parameter Description: Dropout Time } \keyword{internal} rpact/man/rawDataTwoArmNormal.Rd0000644000176200001440000000204714335631007016320 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{rawDataTwoArmNormal} \alias{rawDataTwoArmNormal} \title{Raw Dataset Of A Two Arm Continuous Outcome With Covariates} \format{ A \code{\link[base]{data.frame}} object. } \usage{ rawDataTwoArmNormal } \description{ An artificial dataset that was randomly generated with simulated normal data. The data set has six variables: \enumerate{ \item Subject id \item Stage number \item Group name \item An example outcome in that we are interested in \item The first covariate \emph{gender} \item The second covariate \emph{covariate} } } \details{ See the vignette "Two-arm analysis for continuous data with covariates from raw data" to learn how to \itemize{ \item import raw data from a csv file, \item calculate estimated adjusted (marginal) means (EMMs, least-squares means) for a linear model, and \item perform two-arm interim analyses with these data. } You can use \code{rawDataTwoArmNormal} to reproduce the examples in the vignette. } \keyword{internal} rpact/man/ConditionalPowerResultsMeans.Rd0000644000176200001440000000331514450467342020266 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{ConditionalPowerResultsMeans} \alias{ConditionalPowerResultsMeans} \title{Conditional Power Results Means} \description{ Class for conditional power calculations of means data } \details{ This object cannot be created directly; use \code{\link{getConditionalPower}} with suitable arguments to create the results of a group sequential or a combination test design. } \section{Fields}{ \describe{ \item{\code{nPlanned}}{The sample size planned for each of the subsequent stages. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{simulated}}{Describes if the power for Fisher's combination test has been simulated. Only applicable when using Fisher designs. Is a logical vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{thetaH1}}{The assumed effect under the alternative hypothesis. For survival designs, refers to the hazard ratio. Is a numeric vector.} \item{\code{assumedStDev}}{The assumed standard deviation(s) for means analysis. Is a numeric vector.} }} \keyword{internal} rpact/man/param_informationRates.Rd0000644000176200001440000000065214335631010017130 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_informationRates} \alias{param_informationRates} \title{Parameter Description: Information Rates} \arguments{ \item{informationRates}{The information rates (that must be fixed prior to the trial), default is \code{(1:kMax) / kMax}.} } \description{ Parameter Description: Information Rates } \keyword{internal} rpact/man/param_beta.Rd0000644000176200001440000000100414335631010014507 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_beta} \alias{param_beta} \title{Parameter Description: Beta} \arguments{ \item{beta}{Type II error rate, necessary for providing sample size calculations (e.g., \code{\link[=getSampleSizeMeans]{getSampleSizeMeans()}}), beta spending function designs, or optimum designs, default is \code{0.20}. Must be a positive numeric of length 1.} } \description{ Parameter Description: Beta } \keyword{internal} rpact/man/SummaryFactory.Rd0000644000176200001440000000036114232463333015415 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_summary.R \docType{class} \name{SummaryFactory} \alias{SummaryFactory} \title{Summary Factory} \description{ Basic class for summaries } \keyword{internal} rpact/man/param_stageResults.Rd0000644000176200001440000000062114313321260016264 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_stageResults} \alias{param_stageResults} \title{Parameter Description: Stage Results} \arguments{ \item{stageResults}{The results at given stage, obtained from \code{\link[=getStageResults]{getStageResults()}}.} } \description{ Parameter Description: Stage Results } \keyword{internal} rpact/man/ClosedCombinationTestResults.Rd0000644000176200001440000000276214450501036020250 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_results.R \docType{class} \name{ClosedCombinationTestResults} \alias{ClosedCombinationTestResults} \title{Analysis Results Closed Combination Test} \description{ Class for multi-arm analysis results based on a closed combination test. } \details{ This object cannot be created directly; use \code{\link{getAnalysisResults}} with suitable arguments to create the multi-arm analysis results of a closed combination test design. } \section{Fields}{ \describe{ \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{indices}}{Indicates which stages are available for analysis.} \item{\code{adjustedStageWisePValues}}{The multiplicity adjusted p-values from the separate stages. Is a numeric matrix.} \item{\code{overallAdjustedTestStatistics}}{The overall adjusted test statistics.} \item{\code{separatePValues}}{The p-values from the separate stages. Is a numeric matrix.} \item{\code{conditionalErrorRate}}{The calculated conditional error rate.} \item{\code{secondStagePValues}}{For conditional Dunnett test, the conditional or unconditional p-value calculated for the second stage.} \item{\code{rejected}}{Indicates whether a hypothesis is rejected or not.} \item{\code{rejectedIntersections}}{The simulated number of rejected arms in the closed testing procedure.. Is a logical matrix.} }} \keyword{internal} rpact/man/print.Dataset.Rd0000644000176200001440000000147414335631006015154 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \name{print.Dataset} \alias{print.Dataset} \title{Print Dataset Values} \usage{ \method{print}{Dataset}( x, ..., markdown = FALSE, output = c("list", "long", "wide", "r", "rComplete") ) } \arguments{ \item{x}{A \code{\link{Dataset}} object.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{markdown}{If \code{TRUE}, the output will be created in Markdown.} \item{output}{A character defining the output type, default is "list".} } \description{ \code{print} prints its \code{\link{Dataset}} argument and returns it invisibly (via \code{invisible(x)}). } \details{ Prints the dataset. } \keyword{internal} rpact/man/param_maxInformation.Rd0000644000176200001440000000060114232463334016601 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_maxInformation} \alias{param_maxInformation} \title{Parameter Description: Maximum Information} \arguments{ \item{maxInformation}{Positive integer value specifying the maximum information.} } \description{ Parameter Description: Maximum Information } \keyword{internal} rpact/man/param_allocationRatioPlanned_sampleSize.Rd0000644000176200001440000000122114335631010022417 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_allocationRatioPlanned_sampleSize} \alias{param_allocationRatioPlanned_sampleSize} \title{Parameter Description: Allocation Ratio Planned With Optimum Option} \arguments{ \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. If \code{allocationRatioPlanned = 0} is entered, the optimal allocation ratio yielding the smallest overall sample size is determined.} } \description{ Parameter Description: Allocation Ratio Planned With Optimum Option } \keyword{internal} rpact/man/getLambdaStepFunction.Rd0000644000176200001440000000244314335631006016653 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_plot.R \name{getLambdaStepFunction} \alias{getLambdaStepFunction} \title{Get Lambda Step Function} \usage{ getLambdaStepFunction(timeValues, ..., piecewiseSurvivalTime, piecewiseLambda) } \arguments{ \item{timeValues}{A numeric vector that specifies the time values for which the lambda step values shall be calculated.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{piecewiseSurvivalTime}{A numeric vector that specifies the time intervals for the piecewise definition of the exponential survival time cumulative distribution function (see details).} \item{piecewiseLambda}{A numeric vector that specifies the assumed hazard rate in the treatment group.} } \value{ A numeric vector containing the lambda step values that corresponds to the specified time values. } \description{ Calculates the lambda step values for a given time vector. } \details{ The first element of the vector \code{piecewiseSurvivalTime} must be equal to \code{0}. This function is used for plotting of sample size survival results (cf., \code{\link[=plot.TrialDesignPlan]{plot}}, \code{type = 13} and \code{type = 14}). } \keyword{internal} rpact/man/dataMultiArmRates.Rd0000644000176200001440000000100514313321260015777 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataMultiArmRates} \alias{dataMultiArmRates} \title{Multi-Arm Dataset of Rates} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataMultiArmRates } \description{ A dataset containing the sample sizes and events of three groups. Use \code{getDataset(dataMultiArmRates)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/param_nPlanned.Rd0000644000176200001440000000145514335631010015345 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_nPlanned} \alias{param_nPlanned} \title{Parameter Description: N Planned} \arguments{ \item{nPlanned}{The additional (i.e., "new" and not cumulative) sample size planned for each of the subsequent stages. The argument must be a vector with length equal to the number of remaining stages and contain the combined sample size from both treatment groups if two groups are considered. For survival outcomes, it should contain the planned number of additional events. For multi-arm designs, it is the per-comparison (combined) sample size. For enrichment designs, it is the (combined) sample size for the considered sub-population.} } \description{ Parameter Description: N Planned } \keyword{internal} rpact/man/DatasetSurvival.Rd0000644000176200001440000000311714450467342015561 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_analysis_dataset.R \docType{class} \name{DatasetSurvival} \alias{DatasetSurvival} \alias{DatasetEnrichmentSurvival-class} \alias{DatasetEnrichmentSurvival} \title{Dataset of Survival Data} \description{ Class for a dataset of survival data. } \details{ This object cannot be created directly; better use \code{\link{getDataset}} with suitable arguments to create a dataset of survival data. } \section{Fields}{ \describe{ \item{\code{groups}}{The group numbers. Is a numeric vector.} \item{\code{stages}}{The stage numbers of the trial. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{events}}{The number of events in each group at each stage. Is a numeric vector of length number of stages times number of groups.} \item{\code{overallEvents}}{The overall, i.e., cumulative events. Is a numeric vector of length number of stages times number of groups containing whole numbers.} \item{\code{allocationRatios}}{The observed allocation ratios. Is a numeric vector of length number of stages times number of groups.} \item{\code{overallAllocationRatios}}{The cumulative allocation ratios. Is a numeric vector of length number of stages times number of groups.} \item{\code{logRanks}}{The logrank test statistics at each stage of the trial. Is a numeric vector of length number of stages times number of groups.} \item{\code{overallLogRanks}}{The overall, i.e., cumulative logrank test statistics. Is a numeric vector of length number of stages times number of groups.} }} \keyword{internal} rpact/man/param_alternative.Rd0000644000176200001440000000077414335631010016127 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_alternative} \alias{param_alternative} \title{Parameter Description: Alternative} \arguments{ \item{alternative}{The alternative hypothesis value for testing means. This can be a vector of assumed alternatives, default is \code{seq(0, 1, 0.2)} (power calculations) or \code{seq(0.2, 1, 0.2)} (sample size calculations).} } \description{ Parameter Description: Alternative } \keyword{internal} rpact/man/getAnalysisResults.Rd0000644000176200001440000003676414417202031016304 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getAnalysisResults} \alias{getAnalysisResults} \title{Get Analysis Results} \usage{ getAnalysisResults( design, dataInput, ..., directionUpper = TRUE, thetaH0 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = 1, stage = NA_integer_, maxInformation = NULL, informationEpsilon = NULL ) } \arguments{ \item{design}{The trial design.} \item{dataInput}{The summary data used for calculating the test results. This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival} and should be created with the function \code{\link[=getDataset]{getDataset()}}. For more information see \code{\link[=getDataset]{getDataset()}}.} \item{...}{Further arguments to be passed to methods (cf., separate functions in "See Also" below), e.g., \describe{ \item{\code{thetaH1} and \code{stDevH1} (or \code{assumedStDev} / \code{assumedStDevs}), \code{pi1}, \code{pi2}, or \code{piTreatments}, \code{piControl(s)}}{ The assumed effect size, standard deviation or rates to calculate the conditional power if \code{nPlanned} is specified. For survival designs, \code{thetaH1} refers to the hazard ratio. For one-armed trials with binary outcome, only \code{pi1} can be specified, for two-armed trials with binary outcome, \code{pi1} and \code{pi2} can be specified referring to the assumed treatment and control rate, respectively. In multi-armed or enrichment designs, you can specify a value or a vector with elements referring to the treatment arms or the sub-populations, respectively. For testing rates, the parameters to be specified are \code{piTreatments} and \code{piControl} (multi-arm designs) and \code{piTreatments} and \code{piControls} (enrichment designs).\cr If not specified, the conditional power is calculated under the assumption of observed effect sizes, standard deviations, rates, or hazard ratios.} \item{\code{iterations}}{Iterations for simulating the power for Fisher's combination test. If the power for more than one remaining stages is to be determined for Fisher's combination test, it is estimated via simulation with specified \cr \code{iterations}, the default is \code{1000}.} \item{\code{seed}}{Seed for simulating the conditional power for Fisher's combination test. See above, default is a random seed.} \item{\code{normalApproximation}}{The type of computation of the p-values. Default is \code{FALSE} for testing means (i.e., the t test is used) and \code{TRUE} for testing rates and the hazard ratio. For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test (one sample) or the exact test of Fisher (two samples) is used for calculating the p-values. In the survival setting, \code{normalApproximation = FALSE} has no effect.} \item{\code{equalVariances}}{The type of t test. For testing means in two treatment groups, either the t test assuming that the variances are equal or the t test without assuming this, i.e., the test of Welch-Satterthwaite is calculated, default is \code{TRUE}.} \item{\code{intersectionTest}}{Defines the multiple test for the intersection hypotheses in the closed system of hypotheses when testing multiple hypotheses. Five options are available in multi-arm designs: \code{"Dunnett"}, \code{"Bonferroni"}, \code{"Simes"}, \code{"Sidak"}, and \code{"Hierarchical"}, default is \code{"Dunnett"}. Four options are available in population enrichment designs: \code{"SpiessensDebois"} (one subset only), \code{"Bonferroni"}, \code{"Simes"}, and \code{"Sidak"}, default is \code{"Simes"}.} \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple treatment arms (> 2) or population enrichment designs for testing means. For multiple arms, three options are available: \code{"overallPooled"}, \code{"pairwisePooled"}, and \code{"notPooled"}, default is \code{"overallPooled"}. For enrichment designs, the options are: \code{"pooled"}, \code{"pooledFromFull"} (one subset only), and \code{"notPooled"}, default is \code{"pooled"}.} \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. For testing means and rates, also a non-stratified analysis based on overall data can be performed. For survival data, only a stratified analysis is possible (see Brannath et al., 2009), default is \code{TRUE}.} }} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{nPlanned}{The additional (i.e., "new" and not cumulative) sample size planned for each of the subsequent stages. The argument must be a vector with length equal to the number of remaining stages and contain the combined sample size from both treatment groups if two groups are considered. For survival outcomes, it should contain the planned number of additional events. For multi-arm designs, it is the per-comparison (combined) sample size. For enrichment designs, it is the (combined) sample size for the considered sub-population.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{stage}{The stage number (optional). Default: total number of existing stages in the data input.} \item{maxInformation}{Positive integer value specifying the maximum information.} \item{informationEpsilon}{Positive integer value specifying the absolute information epsilon, which defines the maximum distance from the observed information to the maximum information that causes the final analysis. Updates at the final analysis in case the observed information at the final analysis is smaller ("under-running") than the planned maximum information \code{maxInformation}, default is 0. Alternatively, a floating-point number > 0 and < 1 can be specified to define a relative information epsilon.} } \value{ Returns an \code{\link{AnalysisResults}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.AnalysisResults]{names}} to obtain the field names, \item \code{\link[=print.ParameterSet]{print()}} to print the object, \item \code{\link[=summary.AnalysisResults]{summary()}} to display a summary of the object, \item \code{\link[=plot.AnalysisResults]{plot()}} to plot the object, \item \code{\link[=as.data.frame.AnalysisResults]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Calculates and returns the analysis results for the specified design and data. } \details{ Given a design and a dataset, at given stage the function calculates the test results (effect sizes, stage-wise test statistics and p-values, overall p-values and test statistics, conditional rejection probability (CRP), conditional power, Repeated Confidence Intervals (RCIs), repeated overall p-values, and final stage p-values, median unbiased effect estimates, and final confidence intervals. For designs with more than two treatments arms (multi-arm designs) or enrichment designs a closed combination test is performed. That is, additionally the statistics to be used in a closed testing procedure are provided. The conditional power is calculated if the planned sample size for the subsequent stages (\code{nPlanned}) is specified. The conditional power is calculated either under the assumption of the observed effect or under the assumption of an assumed effect, that has to be specified (see above).\cr For testing rates in a two-armed trial, pi1 and pi2 typically refer to the rates in the treatment and the control group, respectively. This is not mandatory, however, and so pi1 and pi2 can be interchanged. In many-to-one multi-armed trials, piTreatments and piControl refer to the rates in the treatment arms and the one control arm, and so they cannot be interchanged. piTreatments and piControls in enrichment designs can principally be interchanged, but we use the plural form to indicate that the rates can be differently specified for the sub-populations. Median unbiased effect estimates and confidence intervals are calculated if a group sequential design or an inverse normal combination test design was chosen, i.e., it is not applicable for Fisher's p-value combination test design. For the inverse normal combination test design with more than two stages, a warning informs that the validity of the confidence interval is theoretically shown only if no sample size change was performed. A final stage p-value for Fisher's combination test is calculated only if a two-stage design was chosen. For Fisher's combination test, the conditional power for more than one remaining stages is estimated via simulation. Final stage p-values, median unbiased effect estimates, and final confidence intervals are not calculated for multi-arm and enrichment designs. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \section{Note on the dependency of \code{mnormt}}{ If \code{intersectionTest = "Dunnett"} or \code{intersectionTest = "SpiessensDebois"}, or the design is a conditional Dunnett design and the dataset is a multi-arm or enrichment dataset, \code{rpact} uses the R package \href{https://cran.r-project.org/package=mnormt}{mnormt} to calculate the analysis results. } \examples{ \dontrun{ # Example 1 One-Sample t Test # Perform an analysis within a three-stage group sequential design with # O'Brien & Fleming boundaries and one-sample data with a continuous outcome # where H0: mu = 1.2 is to be tested dsnGS <- getDesignGroupSequential() dataMeans <- getDataset( n = c(30, 30), means = c(1.96, 1.76), stDevs = c(1.92, 2.01)) getAnalysisResults(design = dsnGS, dataInput = dataMeans, thetaH0 = 1.2) # You can obtain the results when performing an inverse normal combination test # with these data by using the commands dsnIN <- getDesignInverseNormal() getAnalysisResults(design = dsnIN, dataInput = dataMeans, thetaH0 = 1.2) # Example 2 Use Function Approach with Time to Event Data # Perform an analysis within a use function approach according to an # O'Brien & Fleming type use function and survival data where # where H0: hazard ratio = 1 is to be tested. The events were observed # over time and maxInformation = 120, informationEpsilon = 5 specifies # that 116 > 120 - 5 observed events defines the final analysis. design <- getDesignGroupSequential(typeOfDesign = "asOF") dataSurvival <- getDataset( cumulativeEvents = c(33, 72, 116), cumulativeLogRanks = c(1.33, 1.88, 1.902)) getAnalysisResults(design, dataInput = dataSurvival, maxInformation = 120, informationEpsilon = 5) # Example 3 Multi-Arm Design # In a four-stage combination test design with O'Brien & Fleming boundaries # at the first stage the second treatment arm was dropped. With the Bonferroni # intersection test, the results together with the CRP, conditional power # (assuming a total of 40 subjects for each comparison and effect sizes 0.5 # and 0.8 for treatment arm 1 and 3, respectively, and standard deviation 1.2), # RCIs and p-values of a closed adaptive test procedure are # obtained as follows with the given data (treatment arm 4 refers to the # reference group; displayed with summary and plot commands): data <- getDataset( n1 = c(22, 23), n2 = c(21, NA), n3 = c(20, 25), n4 = c(25, 27), means1 = c(1.63, 1.51), means2 = c(1.4, NA), means3 = c(0.91, 0.95), means4 = c(0.83, 0.75), stds1 = c(1.2, 1.4), stds2 = c(1.3, NA), stds3 = c(1.1, 1.14), stds4 = c(1.02, 1.18)) design <- getDesignInverseNormal(kMax = 4) x <- getAnalysisResults(design, dataInput = data, intersectionTest = "Bonferroni", nPlanned = c(40, 40), thetaH1 = c(0.5, NA, 0.8), assumedStDevs = 1.2) summary(x) if (require(ggplot2)) plot(x, thetaRange = c(0, 0.8)) design <- getDesignConditionalDunnett(secondStageConditioning = FALSE) y <- getAnalysisResults(design, dataInput = data, nPlanned = 40, thetaH1 = c(0.5, NA, 0.8), assumedStDevs = 1.2, stage = 1) summary(y) if (require(ggplot2)) plot(y, thetaRange = c(0, 0.4)) # Example 4 Enrichment Design # Perform an two-stage enrichment design analysis with O'Brien & Fleming boundaries # where one sub-population (S1) and a full population (F) are considered as primary # analysis sets. At interim, S1 is selected for further analysis and the sample # size is increased accordingly. With the Spiessens & Debois intersection test, # the results of a closed adaptive test procedure together with the CRP, repeated # RCIs and p-values are obtained as follows with the given data (displayed with # summary and plot commands): design <- getDesignInverseNormal(kMax = 2, typeOfDesign = "OF") dataS1 <- getDataset( means1 = c(13.2, 12.8), means2 = c(11.1, 10.8), stDev1 = c(3.4, 3.3), stDev2 = c(2.9, 3.5), n1 = c(21, 42), n2 = c(19, 39)) dataNotS1 <- getDataset( means1 = c(11.8, NA), means2 = c(10.5, NA), stDev1 = c(3.6, NA), stDev2 = c(2.7, NA), n1 = c(15, NA), n2 = c(13, NA)) dataBoth <- getDataset(S1 = dataS1, R = dataNotS1) x <- getAnalysisResults(design, dataInput = dataBoth, intersectionTest = "SpiessensDebois", varianceOption = "pooledFromFull", stratifiedAnalysis = TRUE) summary(x) if (require(ggplot2)) plot(x, type = 2) } } \seealso{ \code{\link[=getObservedInformationRates]{getObservedInformationRates()}} Other analysis functions: \code{\link{getClosedCombinationTestResults}()}, \code{\link{getClosedConditionalDunnettTestResults}()}, \code{\link{getConditionalPower}()}, \code{\link{getConditionalRejectionProbabilities}()}, \code{\link{getFinalConfidenceInterval}()}, \code{\link{getFinalPValue}()}, \code{\link{getRepeatedConfidenceIntervals}()}, \code{\link{getRepeatedPValues}()}, \code{\link{getStageResults}()}, \code{\link{getTestActions}()} } \concept{analysis functions} rpact/man/param_varianceOption.Rd0000644000176200001440000000136214335631011016565 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_varianceOption} \alias{param_varianceOption} \title{Parameter Description: Variance Option} \arguments{ \item{varianceOption}{Defines the way to calculate the variance in multiple treatment arms (> 2) or population enrichment designs for testing means. For multiple arms, three options are available: \code{"overallPooled"}, \code{"pairwisePooled"}, and \code{"notPooled"}, default is \code{"overallPooled"}. For enrichment designs, the options are: \code{"pooled"}, \code{"pooledFromFull"} (one subset only), and \code{"notPooled"}, default is \code{"pooled"}.} } \description{ Parameter Description: Variance Option } \keyword{internal} rpact/man/getTestActions.Rd0000644000176200001440000000262514411251744015375 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_analysis_base.R \name{getTestActions} \alias{getTestActions} \title{Get Test Actions} \usage{ getTestActions(stageResults, ...) } \arguments{ \item{stageResults}{The results at given stage, obtained from \code{\link[=getStageResults]{getStageResults()}}.} \item{...}{Only available for backward compatibility.} } \value{ Returns a \code{\link[base]{character}} vector of length \code{kMax} Returns a \code{\link[base]{numeric}} vector of length \code{kMax}containing the test actions of each stage. } \description{ Returns test actions. } \details{ Returns the test actions of the specified design and stage results at the specified stage. } \examples{ design <- getDesignInverseNormal(kMax = 2) data <- getDataset( n = c( 20, 30), means = c( 50, 51), stDevs = c(130, 140) ) getTestActions(getStageResults(design, dataInput = data)) } \seealso{ Other analysis functions: \code{\link{getAnalysisResults}()}, \code{\link{getClosedCombinationTestResults}()}, \code{\link{getClosedConditionalDunnettTestResults}()}, \code{\link{getConditionalPower}()}, \code{\link{getConditionalRejectionProbabilities}()}, \code{\link{getFinalConfidenceInterval}()}, \code{\link{getFinalPValue}()}, \code{\link{getRepeatedConfidenceIntervals}()}, \code{\link{getRepeatedPValues}()}, \code{\link{getStageResults}()} } \concept{analysis functions} rpact/man/param_effectMeasure.Rd0000644000176200001440000000105414335631011016360 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_effectMeasure} \alias{param_effectMeasure} \title{Parameter Description: Effect Measure} \arguments{ \item{effectMeasure}{Criterion for treatment arm/population selection, either based on test statistic (\code{"testStatistic"}) or effect estimate (difference for means and rates or ratio for survival) (\code{"effectEstimate"}), default is \code{"effectEstimate"}.} } \description{ Parameter Description: Effect Measure } \keyword{internal} rpact/man/getOutputFormat.Rd0000644000176200001440000000475314372411347015615 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_output_formats.R \name{getOutputFormat} \alias{getOutputFormat} \title{Get Output Format} \usage{ getOutputFormat( parameterName = NA_character_, ..., file = NA_character_, default = FALSE, fields = TRUE ) } \arguments{ \item{parameterName}{The name of the parameter whose output format shall be returned. Leave the default \code{NA_character_} if the output format of all parameters shall be returned.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{file}{An optional file name where to write the output formats (see Details for more information).} \item{default}{If \code{TRUE} the default output format of the specified parameter(s) will be returned, default is \code{FALSE}.} \item{fields}{If \code{TRUE} the names of all affected object fields will be displayed, default is \code{TRUE}.} } \value{ A named list of output formats. } \description{ With this function the format of the standard outputs of all \code{rpact} objects can be shown and written to a file. } \details{ Output formats can be written to a text file by specifying a \code{file}. See \code{\link[=setOutputFormat]{setOutputFormat()}}() to learn how to read a formerly saved file. Note that the \code{parameterName} must not match exactly, e.g., for p-values the following parameter names will be recognized amongst others: \enumerate{ \item \code{p value} \item \code{p.values} \item \code{p-value} \item \code{pValue} \item \code{rpact.output.format.p.value} } } \examples{ # show output format of p values getOutputFormat("p.value") \dontrun{ # set new p value output format setOutputFormat("p.value", digits = 5, nsmall = 5) # show sample sizes as smallest integers not less than the not rounded values setOutputFormat("sample size", digits = 0, nsmall = 0, roundFunction = "ceiling") getSampleSizeMeans() # show sample sizes as smallest integers not greater than the not rounded values setOutputFormat("sample size", digits = 0, nsmall = 0, roundFunction = "floor") getSampleSizeMeans() # set new sample size output format without round function setOutputFormat("sample size", digits = 2, nsmall = 2) getSampleSizeMeans() # reset sample size output format to default setOutputFormat("sample size") getSampleSizeMeans() getOutputFormat("sample size") } } \seealso{ Other output formats: \code{\link{setOutputFormat}()} } \concept{output formats} rpact/man/param_bindingFutility.Rd0000644000176200001440000000112714335631010016746 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_bindingFutility} \alias{param_bindingFutility} \title{Parameter Description: Binding Futility} \arguments{ \item{bindingFutility}{Logical. If \code{bindingFutility = TRUE} is specified the calculation of the critical values is affected by the futility bounds and the futility threshold is binding in the sense that the study must be stopped if the futility condition was reached (default is \code{FALSE}).} } \description{ Parameter Description: Binding Futility } \keyword{internal} rpact/man/param_userAlphaSpending.Rd0000644000176200001440000000103414335631010017213 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_userAlphaSpending} \alias{param_userAlphaSpending} \title{Parameter Description: User Alpha Spending} \arguments{ \item{userAlphaSpending}{The user defined alpha spending. Numeric vector of length \code{kMax} containing the cumulative alpha-spending (Type I error rate) up to each interim stage: \code{0 <= alpha_1 <= ... <= alpha_K <= alpha}.} } \description{ Parameter Description: User Alpha Spending } \keyword{internal} rpact/man/getPowerRates.Rd0000644000176200001440000001554114417202031015220 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_design_sample_size_calculator.R \name{getPowerRates} \alias{getPowerRates} \title{Get Power Rates} \usage{ getPowerRates( design = NULL, ..., groups = 2L, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = seq(0.2, 0.5, 0.1), pi2 = 0.2, directionUpper = NA, maxNumberOfSubjects = NA_real_, allocationRatioPlanned = NA_real_ ) } \arguments{ \item{design}{The trial design. If no trial design is specified, a fixed sample size design is used. In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, and \code{sided} can be directly entered as argument where necessary.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{groups}{The number of treatment groups (1 or 2), default is \code{2}.} \item{riskRatio}{If \code{TRUE}, the power for one-sided testing of H0: \code{pi1 / pi2 = thetaH0} is calculated, default is \code{FALSE}.} \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} \item{pi1}{A numeric value or vector that represents the assumed probability in the active treatment group if two treatment groups are considered, or the alternative probability for a one treatment group design, default is \code{seq(0.2, 0.5, 0.1)} (power calculations and simulations) or \code{seq(0.4, 0.6, 0.1)} (sample size calculations).} \item{pi2}{A numeric value that represents the assumed probability in the reference group if two treatment groups are considered, default is \code{0.2}.} \item{directionUpper}{Logical. Specifies the direction of the alternative, only applicable for one-sided testing; default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values.} \item{maxNumberOfSubjects}{\code{maxNumberOfSubjects > 0} needs to be specified. For two treatment arms, it is the maximum number of subjects for both treatment arms.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} } \value{ Returns a \code{\link{TrialDesignPlan}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object, \item \code{\link[=summary.TrialDesignSet]{summary()}} to display a summary of the object, \item \code{\link[=plot.TrialDesignPlan]{plot()}} to plot the object, \item \code{\link[=as.data.frame.TrialDesignPlan]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. } } \description{ Returns the power, stopping probabilities, and expected sample size for testing rates in one or two samples at given sample sizes. } \details{ At given design the function calculates the power, stopping probabilities, and expected sample size, for testing rates for given maximum sample size. The sample sizes over the stages are calculated according to the specified information rate in the design. In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. If a null hypothesis value thetaH0 != 0 for testing the difference of two rates or \code{thetaH0 != 1} for testing the risk ratio is specified, the formulas according to Farrington & Manning (Statistics in Medicine, 1990) are used (only one-sided testing). Critical bounds and stopping for futility bounds are provided at the effect scale (rate, rate difference, or rate ratio, respectively). For the two-sample case, the calculation here is performed at fixed pi2 as given as argument in the function. Note that the power calculation for rates is always based on the normal approximation. } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \examples{ # Calculate the power, stopping probabilities, and expected sample size in a # two-armed design at given maximum sample size N = 200 in a three-stage # O'Brien & Fleming design with information rate vector (0.2,0.5,1), # non-binding futility boundaries (0,0), i.e., the study stops for futility # if the p-value exceeds 0.5 at interm, and allocation ratio = 2 for a range # of pi1 values when testing H0: pi1 - pi2 = -0.1: getPowerRates(getDesignGroupSequential(informationRates = c(0.2, 0.5, 1), futilityBounds = c(0, 0)), groups = 2, thetaH0 = -0.1, pi1 = seq(0.3, 0.6, 0.1), directionUpper = FALSE, pi2 = 0.7, allocationRatioPlanned = 2, maxNumberOfSubjects = 200) \dontrun{ # Calculate the power, stopping probabilities, and expected sample size in a single # arm design at given maximum sample size N = 60 in a three-stage two-sided # O'Brien & Fleming design with information rate vector (0.2, 0.5,1) # for a range of pi1 values when testing H0: pi = 0.3: getPowerRates(getDesignGroupSequential(informationRates = c(0.2, 0.5,1), sided = 2), groups = 1, thetaH0 = 0.3, pi1 = seq(0.3, 0.5, 0.05), maxNumberOfSubjects = 60) } } \seealso{ Other power functions: \code{\link{getPowerMeans}()}, \code{\link{getPowerSurvival}()} } \concept{power functions} rpact/man/knit_print.ParameterSet.Rd0000644000176200001440000000227414447474566017233 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_parameter_set.R \name{knit_print.ParameterSet} \alias{knit_print.ParameterSet} \title{Print Parameter Set in Markdown Code Chunks} \usage{ \method{knit_print}{ParameterSet}(x, ...) } \arguments{ \item{x}{A \code{ParameterSet}.} \item{...}{Other arguments (see \code{\link[knitr]{knit_print}}).} } \description{ The function \code{knit_print.ParameterSet} is the default printing function for rpact result objects in knitr. The chunk option \code{render} uses this function by default. To fall back to the normal printing behavior set the chunk option \code{render = normal_print}. For more information see \code{\link[knitr]{knit_print}}. } \details{ Generic function to print a parameter set in Markdown. Use \code{options("rpact.print.heading.base.number" = "NUMBER")} (where \code{NUMBER} is an integer value >= -1) to specify the heading level. The default is \code{options("rpact.print.heading.base.number" = "0")}, i.e., the top headings start with \code{##} in Markdown. \code{options("rpact.print.heading.base.number" = "-1")} means that all headings will be written bold but are not explicit defined as header. } rpact/man/PlotSettings.Rd0000644000176200001440000000321314232463333015066 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_core_plot_settings.R \docType{class} \name{PlotSettings} \alias{PlotSettings} \title{Plot Settings} \description{ Class for plot settings. } \details{ Collects typical plot settings in an object. } \section{Fields}{ \describe{ \item{\code{lineSize}}{The line size.} \item{\code{pointSize}}{The point size.} \item{\code{pointColor}}{The point color, e.g., "red" or "blue".} \item{\code{mainTitleFontSize}}{The main tile font size.} \item{\code{axesTextFontSize}}{The text font size.} \item{\code{legendFontSize}}{The legend font size.} \item{\code{scalingFactor}}{The scaling factor.} }} \section{Methods}{ \describe{ \item{\code{adjustLegendFontSize(adjustingValue)}}{Adjusts the legend font size, e.g., run \cr \code{adjustLegendFontSize(-2)} # makes the font size 2 points smaller} \item{\code{enlargeAxisTicks(p)}}{Enlarges the axis ticks} \item{\code{expandAxesRange(p, x = NA_real_, y = NA_real_)}}{Expands the axes range} \item{\code{hideGridLines(p)}}{Hides the grid lines} \item{\code{setAxesAppearance(p)}}{Sets the font size and face of the axes titles and texts} \item{\code{setColorPalette(p, palette, mode = c("colour", "fill", "all"))}}{Sets the color palette} \item{\code{setLegendBorder(p)}}{Sets the legend border} \item{\code{setMainTitle(p, mainTitle, subtitle = NA_character_)}}{Sets the main title} \item{\code{setMarginAroundPlot(p, margin = 0.2)}}{Sets the margin around the plot, e.g., run \cr \code{setMarginAroundPlot(p, .2)} or \cr \code{setMarginAroundPlot(p, c(.1, .2, .1, .2)}} \item{\code{setTheme(p)}}{Sets the theme} }} \keyword{internal} rpact/man/getAvailablePlotTypes.Rd0000644000176200001440000000326114402556623016702 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_core_plot.R \name{plotTypes} \alias{plotTypes} \alias{getAvailablePlotTypes} \title{Get Available Plot Types} \usage{ plotTypes( obj, output = c("numeric", "caption", "numcap", "capnum"), numberInCaptionEnabled = FALSE ) getAvailablePlotTypes( obj, output = c("numeric", "caption", "numcap", "capnum"), numberInCaptionEnabled = FALSE ) } \arguments{ \item{obj}{The object for which the plot types shall be identified, e.g. produced by \code{\link[=getDesignGroupSequential]{getDesignGroupSequential()}} or \code{\link[=getSampleSizeMeans]{getSampleSizeMeans()}}.} \item{output}{The output type. Can be one of \code{c("numeric", "caption", "numcap", "capnum")}.} \item{numberInCaptionEnabled}{If \code{TRUE}, the number will be added to the caption, default is \code{FALSE}.} } \value{ Returns a list if \code{option} is either \code{capnum} or {numcap} or returns a vector that is of character type for \code{option=caption} or of numeric type for \code{option=numeric}. } \description{ Function to identify the available plot types of an object. } \details{ \code{plotTypes} and \code{getAvailablePlotTypes()} are equivalent, i.e., \code{plotTypes} is a short form of \code{getAvailablePlotTypes()}. \code{output}: \enumerate{ \item \code{numeric}: numeric output \item \code{caption}: caption as character output \item \code{numcap}: list with number and caption \item \code{capnum}: list with caption and number } } \examples{ design <- getDesignInverseNormal(kMax = 2) getAvailablePlotTypes(design, "numeric") plotTypes(design, "caption") getAvailablePlotTypes(design, "numcap") plotTypes(design, "capnum") } rpact/man/SimulationResultsMultiArmSurvival.Rd0000644000176200001440000001330714450467343021360 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_simulation_results.R \docType{class} \name{SimulationResultsMultiArmSurvival} \alias{SimulationResultsMultiArmSurvival} \title{Class for Simulation Results Multi-Arm Survival} \description{ A class for simulation results survival in multi-arm designs. } \details{ Use \code{\link[=getSimulationMultiArmSurvival]{getSimulationMultiArmSurvival()}} to create an object of this type. } \section{Fields}{ \describe{ \item{\code{maxNumberOfIterations}}{The number of simulation iterations. Is a numeric vector of length 1 containing a whole number.} \item{\code{seed}}{The seed used for random number generation. Is a numeric vector of length 1.} \item{\code{allocationRatioPlanned}}{The planned allocation ratio (\code{n1 / n2}) for the groups. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. Is a positive numeric vector of length 1.} \item{\code{conditionalPower}}{The conditional power at each stage of the trial. Is a numeric vector of length 1 containing a value between 0 and 1.} \item{\code{iterations}}{The number of iterations used for simulations. Is a numeric vector of length 1 containing a whole number.} \item{\code{futilityPerStage}}{The per-stage probabilities of stopping the trial for futility. Is a numeric matrix.} \item{\code{futilityStop}}{In simulation results data set: indicates whether trial is stopped for futility or not.} \item{\code{directionUpper}}{Specifies the direction of the alternative, only applicable for one-sided testing. Default is \code{TRUE} which means that larger values of the test statistics yield smaller p-values. Is a logical vector of length 1.} \item{\code{plannedEvents}}{Determines the number of cumulated (overall) events in survival designs when the interim stages are planned. For two treatment arms, is the number of events for both treatment arms. For multi-arm designs, refers to the overall number of events for the selected arms plus control. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{minNumberOfEventsPerStage}}{Determines the minimum number of events per stage for data-driven sample size recalculation. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{maxNumberOfEventsPerStage}}{Determines the maximum number of events per stage for data-driven sample size recalculation. Is a numeric vector of length \code{kMax} containing whole numbers.} \item{\code{expectedNumberOfEvents}}{The expected number of events under specified alternative. Is a numeric vector.} \item{\code{activeArms}}{The number of active treatment arms to be compared with control. Is a numeric vector of length 1 containing a whole number.} \item{\code{effectMatrix}}{The matrix of effect sizes with \code{activeArms} columns and number of rows reflecting the different situations to consider.} \item{\code{typeOfShape}}{The shape of the dose-response relationship over the treatment groups. Is a character vector of length 1.} \item{\code{omegaMaxVector}}{The range of hazard ratios with highest response for \code{"linear"} and \code{"sigmoidEmax"} model. Is a numeric vector.} \item{\code{gED50}}{The ED50 of the sigmoid Emax model. Only necessary if \code{typeOfShape = "sigmoidEmax"} has been specified. Is a numeric vector of length 1.} \item{\code{slope}}{The slope of the sigmoid Emax model, if \code{typeOfShape = "sigmoidEmax"} Is a numeric vector of length 1.} \item{\code{intersectionTest}}{The multiple test used for intersection hypotheses in closed systems of hypotheses. Is a character vector of length 1.} \item{\code{adaptations}}{Indicates whether or not an adaptation takes place at interim k. Is a logical vector of length \code{kMax} minus 1.} \item{\code{epsilonValue}}{Needs to be specified if \code{typeOfSelection = "epsilon"}. Is a numeric vector of length 1.} \item{\code{rValue}}{Needs to be specified if \code{typeOfSelection = "rBest"}. Is a numeric vector of length 1.} \item{\code{threshold}}{The selection criterion: treatment arm/population is only selected if \code{effectMeasure} exceeds \code{threshold}. Either a single numeric value or a numeric vector of length \code{activeArms} referring to a separate threshold condition for each treatment arm.} \item{\code{selectArmsFunction}}{An optional function that can be entered to define how treatment arms are selected.} \item{\code{correlationComputation}}{If \code{"alternative"}, a correlation matrix according to Deng et al. (Biometrics, 2019) accounting for the respective alternative is used for simulating log-rank statistics in the many-to-one design. If \code{"null"}, a constant correlation matrix valid under the null hypothesis is used.} \item{\code{earlyStop}}{The probability to stopping the trial either for efficacy or futility. Is a numeric vector.} \item{\code{selectedArms}}{The selected arms in multi-armed designs.} \item{\code{numberOfActiveArms}}{The number of active arms in a multi-armed design. Is a numeric matrix.} \item{\code{rejectAtLeastOne}}{The probability to reject at least one of the (multiple) hypotheses. Is a numeric vector.} \item{\code{rejectedArmsPerStage}}{The simulated number of rejected arms per stage.} \item{\code{successPerStage}}{The simulated success probabilities per stage where success is defined by user. Is a numeric matrix.} \item{\code{eventsPerStage}}{The number of events per stage. Is a numeric matrix.} \item{\code{singleNumberOfEventsPerStage}}{In simulation results data set: the number of events per stage that is used for the analysis.} \item{\code{conditionalPowerAchieved}}{The calculated conditional power, under the assumption of observed or assumed effect sizes. Is a numeric matrix.} }} \keyword{internal} rpact/man/summary.TrialDesignSet.Rd0000644000176200001440000000542114335631007017006 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design_set.R \name{summary.TrialDesignSet} \alias{summary.TrialDesignSet} \title{Trial Design Set Summary} \usage{ \method{summary}{TrialDesignSet}(object, ..., type = 1, digits = NA_integer_) } \arguments{ \item{object}{A \code{\link{ParameterSet}} object.} \item{...}{Ensures that all arguments (starting from the "...") are to be named and that a warning will be displayed if unknown arguments are passed.} \item{digits}{Defines how many digits are to be used for numeric values. Must be a positive integer of length 1.} } \value{ Returns a \code{\link{SummaryFactory}} object. The following generics (R generic functions) are available for this result object: \itemize{ \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, \item \code{\link[=print.FieldSet]{print()}} to print the object } } \description{ Displays a summary of \code{\link{ParameterSet}} object. } \details{ Summarizes the trial designs. } \section{Summary options}{ The following options can be set globally: \enumerate{ \item \code{rpact.summary.output.size}: one of \code{c("small", "medium", "large")}; defines how many details will be included into the summary; default is \code{"large"}, i.e., all available details are displayed. \item \code{rpact.summary.justify}: one of \code{c("right", "left", "centre")}; shall the values be right-justified (the default), left-justified or centered. \item \code{rpact.summary.width}: defines the maximum number of characters to be used per line (default is \code{83}). \item \code{rpact.summary.intervalFormat}: defines how intervals will be displayed in the summary, default is \code{"[\%s; \%s]"}. \item \code{rpact.summary.digits}: defines how many digits are to be used for numeric values (default is \code{3}). \item \code{rpact.summary.digits.probs}: defines how many digits are to be used for numeric values (default is one more than value of \code{rpact.summary.digits}, i.e., \code{4}). \item \code{rpact.summary.trim.zeroes}: if \code{TRUE} (default) zeroes will always displayed as "0", e.g. "0.000" will become "0". } Example: \code{options("rpact.summary.intervalFormat" = "\%s - \%s")} } \section{How to get help for generic functions}{ Click on the link of a generic in the list above to go directly to the help documentation of the \code{rpact} specific implementation of the generic. Note that you can use the R function \code{\link[utils]{methods}} to get all the methods of a generic and to identify the object specific name of it, e.g., use \code{methods("plot")} to get all the methods for the \code{plot} generic. There you can find, e.g., \code{plot.AnalysisResults} and obtain the specific help documentation linked above by typing \code{?plot.AnalysisResults}. } \keyword{internal} rpact/man/getLogLevel.Rd0000644000176200001440000000125714335631006014644 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/f_logger.R \name{getLogLevel} \alias{getLogLevel} \title{Get Log Level} \usage{ getLogLevel() } \value{ Returns a \code{\link[base]{character}} of length 1 specifying the current log level. } \description{ Returns the current \code{rpact} log level. } \details{ This function gets the log level of the \code{rpact} internal log message system. } \examples{ # show current log level getLogLevel() } \seealso{ \itemize{ \item \code{\link[=setLogLevel]{setLogLevel()}} for setting the log level, \item \code{\link[=resetLogLevel]{resetLogLevel()}} for resetting the log level to default. } } \keyword{internal} rpact/man/plot.TrialDesign.Rd0000644000176200001440000001253414436052674015627 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_design.R \name{plot.TrialDesign} \alias{plot.TrialDesign} \alias{plot.TrialDesignCharacteristics} \title{Trial Design Plotting} \usage{ \method{plot}{TrialDesign}( x, y, ..., main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1L, palette = "Set1", theta = seq(-1, 1, 0.01), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, grid = 1, plotSettings = NULL ) \method{plot}{TrialDesignCharacteristics}(x, y, ...) } \arguments{ \item{x}{The trial design, obtained from \cr \code{\link[=getDesignGroupSequential]{getDesignGroupSequential()}}, \cr \code{\link[=getDesignInverseNormal]{getDesignInverseNormal()}} or \cr \code{\link[=getDesignFisher]{getDesignFisher()}}.} \item{y}{Not available for this kind of plot (is only defined to be compatible to the generic plot function).} \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{type}{The plot type (default = \code{1}). The following plot types are available: \itemize{ \item \code{1}: creates a 'Boundaries' plot \item \code{3}: creates a 'Stage Levels' plot \item \code{4}: creates a 'Error Spending' plot \item \code{5}: creates a 'Power and Early Stopping' plot \item \code{6}: creates an 'Average Sample Size and Power / Early Stop' plot \item \code{7}: creates an 'Power' plot \item \code{8}: creates an 'Early Stopping' plot \item \code{9}: creates an 'Average Sample Size' plot \item \code{"all"}: creates all available plots and returns it as a grid plot or list }} \item{palette}{The palette, default is \code{"Set1"}.} \item{theta}{A vector of standardized effect sizes (theta values), default is a sequence from -1 to 1.} \item{nMax}{The maximum sample size. Must be a positive integer of length 1.} \item{plotPointsEnabled}{Logical. If \code{TRUE}, additional points will be plotted.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{Logical. If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with the base R \code{plot} function. Alternatively \code{showSource} can be defined as one of the following character values: \itemize{ \item \code{"commands"}: returns a character vector with plot commands \item \code{"axes"}: returns a list with the axes definitions \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function does not stop if an error occurs) \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function stops if an error occurs) } Note: no plot object will be returned if \code{showSource} is a character.} \item{grid}{An integer value specifying the output of multiple plots. By default (\code{1}) a list of \code{ggplot} objects will be returned. If a \code{grid} value > 1 was specified, a grid plot will be returned if the number of plots is <= specified \code{grid} value; a list of \code{ggplot} objects will be returned otherwise. If \code{grid = 0} is specified, all plots will be created using \code{\link[base]{print}} command and a list of \code{ggplot} objects will be returned invisible. Note that one of the following packages must be installed to create a grid plot: 'ggpubr', 'gridExtra', or 'cowplot'.} \item{plotSettings}{An object of class \code{PlotSettings} created by \code{\link[=getPlotSettings]{getPlotSetting()s}}.} } \value{ Returns a \code{ggplot2} object. } \description{ Plots a trial design. } \details{ Generic function to plot a trial design. Generic function to plot a trial design. Note that \code{\link[=param_nMax]{nMax}} is not an argument that it passed to \code{ggplot2}. Rather, the underlying calculations (e.g. power for different theta's or average sample size) are based on calls to function \code{\link[=getPowerAndAverageSampleNumber]{getPowerAndAverageSampleNumber()}} which has argument \code{\link[=param_nMax]{nMax}}. I.e., \code{\link[=param_nMax]{nMax}} is not an argument to ggplot2 but to \code{\link[=getPowerAndAverageSampleNumber]{getPowerAndAverageSampleNumber()}} which is called prior to plotting. } \examples{ \dontrun{ design <- getDesignInverseNormal( kMax = 3, alpha = 0.025, typeOfDesign = "asKD", gammaA = 2, informationRates = c(0.2, 0.7, 1), typeBetaSpending = "bsOF" ) if (require(ggplot2)) { plot(design) # default: type = 1 } } } \seealso{ \code{\link[=plot.TrialDesignSet]{plot()}} to compare different designs or design parameters visual. } rpact/man/NumberOfSubjects.Rd0000644000176200001440000000166314450467343015665 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_event_probabilities.R \docType{class} \name{NumberOfSubjects} \alias{NumberOfSubjects} \title{Number Of Subjects} \description{ Class for the definition of number of subjects results. } \details{ \code{NumberOfSubjects} is a class for the definition of number of subjects results. } \section{Fields}{ \describe{ \item{\code{time}}{The time values. Is a numeric vector.} \item{\code{accrualTime}}{The assumed accrual time intervals for the study. Is a numeric vector.} \item{\code{accrualIntensity}}{The absolute accrual intensities. Is a numeric vector of length \code{kMax}.} \item{\code{maxNumberOfSubjects}}{The maximum number of subjects for power calculations. Is a numeric vector.} \item{\code{numberOfSubjects}}{In simulation results data set: The number of subjects under consideration when the interim analysis takes place.} }} \keyword{internal} rpact/man/plot.EventProbabilities.Rd0000644000176200001440000000712714417202031017175 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/class_event_probabilities.R \name{plot.EventProbabilities} \alias{plot.EventProbabilities} \title{Event Probabilities Plotting} \usage{ \method{plot}{EventProbabilities}( x, y, ..., allocationRatioPlanned = x$allocationRatioPlanned, main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1L, legendTitle = NA_character_, palette = "Set1", plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, plotSettings = NULL ) } \arguments{ \item{x}{The object that inherits from \code{\link{EventProbabilities}}.} \item{y}{An optional object that inherits from \code{\link{NumberOfSubjects}}.} \item{...}{Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented for changing x or y axis limits without dropping data observations.} \item{allocationRatioPlanned}{The planned allocation ratio \code{n1 / n2} for a two treatment groups design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. It can be a vector of length kMax, too, for multi-arm and enrichment designs. In these cases, a change of allocating subjects to treatment groups over the stages can be assessed.} \item{main}{The main title.} \item{xlab}{The x-axis label.} \item{ylab}{The y-axis label.} \item{type}{The plot type (default = 1). Note that at the moment only one type is available.} \item{legendTitle}{The legend title, default is \code{""}.} \item{palette}{The palette, default is \code{"Set1"}.} \item{plotPointsEnabled}{Logical. If \code{TRUE}, additional points will be plotted.} \item{legendPosition}{The position of the legend. By default (\code{NA_integer_}) the algorithm tries to find a suitable position. Choose one of the following values to specify the position manually: \itemize{ \item \code{-1}: no legend will be shown \item \code{NA}: the algorithm tries to find a suitable position \item \code{0}: legend position outside plot \item \code{1}: legend position left top \item \code{2}: legend position left center \item \code{3}: legend position left bottom \item \code{4}: legend position right top \item \code{5}: legend position right center \item \code{6}: legend position right bottom }} \item{showSource}{Logical. If \code{TRUE}, the parameter names of the object will be printed which were used to create the plot; that may be, e.g., useful to check the values or to create own plots with the base R \code{plot} function. Alternatively \code{showSource} can be defined as one of the following character values: \itemize{ \item \code{"commands"}: returns a character vector with plot commands \item \code{"axes"}: returns a list with the axes definitions \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function does not stop if an error occurs) \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and returned as character vector (function stops if an error occurs) } Note: no plot object will be returned if \code{showSource} is a character.} \item{plotSettings}{An object of class \code{PlotSettings} created by \code{\link[=getPlotSettings]{getPlotSetting()s}}.} } \value{ Returns a \code{ggplot2} object. } \description{ Plots an object that inherits from class \code{\link{EventProbabilities}}. } \details{ Generic function to plot an event probabilities object. Generic function to plot a parameter set. } rpact/man/param_dataInput.Rd0000644000176200001440000000112714335631010015533 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_dataInput} \alias{param_dataInput} \title{Parameter Description: Data Input} \arguments{ \item{dataInput}{The summary data used for calculating the test results. This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival} and should be created with the function \code{\link[=getDataset]{getDataset()}}. For more information see \code{\link[=getDataset]{getDataset()}}.} } \description{ Parameter Description: Data Input } \keyword{internal} rpact/man/param_thetaH0.Rd0000644000176200001440000000210514335631010015074 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_thetaH0} \alias{param_thetaH0} \title{Parameter Description: Theta H0} \arguments{ \item{thetaH0}{The null hypothesis value, default is \code{0} for the normal and the binary case (testing means and rates, respectively), it is \code{1} for the survival case (testing the hazard ratio).\cr\cr For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. That is, in case of (one-sided) testing of \itemize{ \item \emph{means}: a value \code{!= 0} (or a value \code{!= 1} for testing the mean ratio) can be specified. \item \emph{rates}: a value \code{!= 0} (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. } For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for defining the null hypothesis H0: \code{pi = thetaH0}.} } \description{ Parameter Description: Theta H0 } \keyword{internal} rpact/man/dataMeans.Rd0000644000176200001440000000076514313321260014325 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{dataMeans} \alias{dataMeans} \title{One-Arm Dataset of Means} \format{ A \code{\link[base]{data.frame}} object. } \usage{ dataMeans } \description{ A dataset containing the sample sizes, means, and standard deviations of one group. Use \code{getDataset(dataMeans)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. } \keyword{internal} rpact/man/param_effectMatrix.Rd0000644000176200001440000000065614335631011016232 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameter_descriptions.R \name{param_effectMatrix} \alias{param_effectMatrix} \title{Parameter Description: Effect Matrix} \arguments{ \item{effectMatrix}{Matrix of effect sizes with \code{activeArms} columns and number of rows reflecting the different situations to consider.} } \description{ Parameter Description: Effect Matrix } \keyword{internal} rpact/DESCRIPTION0000644000176200001440000000753314450555553013114 0ustar liggesusersPackage: rpact Title: Confirmatory Adaptive Clinical Trial Design and Analysis Version: 3.4.0 Date: 2023-07-03 Authors@R: c( person( given = "Gernot", family = "Wassmer", email = "gernot.wassmer@rpact.com", comment = c(ORCID = "0000-0001-9397-1794"), role = c("aut") ), person( given = "Friedrich", family = "Pahlke", email = "friedrich.pahlke@rpact.com", comment = c(ORCID = "0000-0003-2105-2582"), role = c("aut", "cre") ), person( given = "Till", family = "Jensen", email = "till.jensen@rpact.com", role = c("ctb") ), person( given = "Stephen", family = "Schueuerhuis", email = "stephen.schueuerhuis@charite.de", role = c("ctb") )) Description: Design and analysis of confirmatory adaptive clinical trials with continuous, binary, and survival endpoints according to the methods described in the monograph by Wassmer and Brannath (2016) . This includes classical group sequential as well as multi-stage adaptive hypotheses tests that are based on the combination testing principle. License: LGPL-3 Encoding: UTF-8 LazyData: true URL: https://www.rpact.org, https://www.rpact.com, https://github.com/rpact-com/rpact, https://rpact-com.github.io/rpact/ BugReports: https://github.com/rpact-com/rpact/issues Language: en-US Depends: R (>= 3.6.0) Imports: methods, stats, utils, graphics, tools, rlang, knitr (>= 1.19), Rcpp (>= 1.0.3) LinkingTo: Rcpp Suggests: ggplot2 (>= 2.2.0), testthat (>= 3.0.0), mnormt (>= 1.5-7), rmarkdown (>= 1.10) VignetteBuilder: knitr, rmarkdown RoxygenNote: 7.2.3 Config/testthat/edition: 3 Config/testthat/parallel: true Config/testthat/start-first: *analysis* Collate: 'RcppExports.R' 'f_logger.R' 'f_core_constants.R' 'f_core_utilities.R' 'f_core_assertions.R' 'f_analysis_utilities.R' 'f_parameter_set_utilities.R' 'class_core_parameter_set.R' 'class_core_plot_settings.R' 'f_core_plot.R' 'class_design.R' 'f_object_r_code.R' 'f_analysis_base.R' 'class_analysis_dataset.R' 'class_analysis_stage_results.R' 'class_analysis_results.R' 'class_time.R' 'class_design_set.R' 'f_design_utilities.R' 'class_design_plan.R' 'class_design_power_and_asn.R' 'class_event_probabilities.R' 'f_simulation_utilities.R' 'f_simulation_base_survival.R' 'class_simulation_results.R' 'class_performance_score.R' 'class_summary.R' 'data.R' 'f_analysis_base_means.R' 'f_analysis_base_rates.R' 'f_analysis_base_survival.R' 'f_analysis_enrichment.R' 'f_analysis_enrichment_means.R' 'f_analysis_enrichment_rates.R' 'f_analysis_enrichment_survival.R' 'f_analysis_multiarm.R' 'f_analysis_multiarm_means.R' 'f_analysis_multiarm_rates.R' 'f_analysis_multiarm_survival.R' 'f_core_output_formats.R' 'f_design_fisher_combination_test.R' 'f_design_group_sequential.R' 'f_design_sample_size_calculator.R' 'f_quality_assurance.R' 'f_simulation_base_means.R' 'f_simulation_base_rates.R' 'f_simulation_calc_subjects_function.R' 'f_simulation_enrichment.R' 'f_simulation_enrichment_means.R' 'f_simulation_enrichment_rates.R' 'f_simulation_enrichment_survival.R' 'f_simulation_multiarm.R' 'f_simulation_multiarm_means.R' 'f_simulation_multiarm_rates.R' 'f_simulation_multiarm_survival.R' 'f_simulation_performance_score.R' 'parameter_descriptions.R' 'pkgname.R' NeedsCompilation: yes Packaged: 2023-07-03 13:54:12 UTC; fried Author: Gernot Wassmer [aut] (), Friedrich Pahlke [aut, cre] (), Till Jensen [ctb], Stephen Schueuerhuis [ctb] Maintainer: Friedrich Pahlke Repository: CRAN Date/Publication: 2023-07-03 14:30:03 UTC rpact/build/0000755000176200001440000000000014450551404012464 5ustar liggesusersrpact/build/vignette.rds0000644000176200001440000000033514450551401015021 0ustar liggesusersuK @, u)]B:tE7|H>y6"7{3 !hbZtB4by !9@ZA L?$x{D&2\2<8c/٪`9ZkWײ^i%M)&͌GFҌO})|.T~7D8Z4y0c1`] trpact/build/partial.rdb0000644000176200001440000000007514450551257014621 0ustar liggesusersb```b`abb`b1 H020piּb C".X7rpact/tests/0000755000176200001440000000000014070776017012536 5ustar liggesusersrpact/tests/testthat/0000755000176200001440000000000014450555552014377 5ustar liggesusersrpact/tests/testthat/test-f_core_plot.R0000644000176200001440000000722714440565454020003 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_core_plot.R ## | Creation date: 06 February 2023, 12:11:55 ## | File version: $Revision: 7065 $ ## | Last changed: $Date: 2023-06-09 11:04:44 +0200 (Fr, 09 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing .reconstructSequenceCommand") test_that("The output is as exptected", { expect_equal(.reconstructSequenceCommand(seq(-1, 1, 0.02)), "seq(-1, 1, 0.02)") expect_equal(.reconstructSequenceCommand(c()), NA_character_) expect_equal(.reconstructSequenceCommand(c(1)), "1") expect_equal(.reconstructSequenceCommand(c(1, 2)), "c(1, 2)") expect_equal(.reconstructSequenceCommand(c(1, 2, 3)), "c(1, 2, 3)") expect_equal(.reconstructSequenceCommand(c(1, 2, 3, 4)), "seq(1, 4, 1)") expect_equal(.reconstructSequenceCommand(c(1, 2, 3, 5)), "c(1, 2, 3, 5)") expect_true(grepl(.getRexepSaveCharacter("x$.design"), "x$.design")) expect_true(grepl(.getRexepSaveCharacter("x$.design"), "c(x$.design, xxx)")) expect_false(grepl(.getRexepSaveCharacter("x$.design"), "c(x$design, xxx)")) }) test_that("Internal core plot functions throw errors when arguments are missing or wrong", { expect_equal(.addNumberToPlotCaption(caption = "hello", type = "character"), "hello") expect_error(.getPlotCaption()) expect_error(.getPlotTypeNumber()) expect_error(.getPlotTypeNumber(type = "test")) expect_error(.createPlotResultObject()) expect_error(.createPlotResultObject(list(x = 1), grid = -1)) expect_error(.createPlotResultObject(list(x = 1), grid = 101)) expect_error(.createPlotResultObject(list(x = 1), grid = 101)) expect_error(.printPlotShowSourceSeparator()) expect_error(plotTypes()) expect_error(.isValidVariedParameterVectorForPlotting()) expect_error(.removeInvalidPlotTypes()) expect_error(getAvailablePlotTypes()) expect_error(.getVariedParameterHint()) expect_error(.createValidParameterName()) expect_equal(.createValidParameterName(NULL, "hello"), "hello") expect_equal(.createValidParameterName("HI", "hello"), "HI$hello") expect_null(.showPlotSourceInformation()) expect_error(.testPlotCommand()) expect_error(.getParameterSetAsDataFrame()) expect_error(.getCategories()) expect_error(.getAxisLabel()) expect_equal(.getAxisLabel("heho", NULL), "%heho%") expect_error(.allGroupValuesEqual()) expect_error(.plotParameterSet()) expect_error(.naAndNaNOmit()) expect_null(.naAndNaNOmit(NULL)) expect_error(.getScalingFactors()) expect_error(.plotDataFrame()) expect_error(.getPointBorder()) expect_error(.getLegendPosition()) expect_error(.addQnormAlphaLine()) expect_equal(.getLambdaStepFunctionByTime(3, NA, 5), 5) expect_error(.getLambdaStepFunction()) expect_error(getLambdaStepFunction()) expect_type(.getRelativeFigureOutputPath(), "character") expect_error(saveLastPlot()) expect_error(.getGridPlotSettings()) expect_error(.getGridLegendPosition()) expect_error(.formatSubTitleValue()) }) rpact/tests/testthat/test-generic_functions.R0000644000176200001440000001401514370207346021200 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-generic_functions.R ## | Creation date: 06 February 2023, 12:14:59 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Class 'SummaryFactory'") test_that("Testing 'summary.ParameterSet': no errors occur", { .skipTestIfDisabled() design <- getDesignGroupSequential( alpha = 0.05, kMax = 4, sided = 1, typeOfDesign = "WT", deltaWT = 0.1 ) designFisher <- getDesignFisher( kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.5, 0.8, 1), alpha0Vec = rep(0.4, 3) ) designCharacteristics <- getDesignCharacteristics(design) powerAndASN <- getPowerAndAverageSampleNumber(design, theta = 1, nMax = 100) designSet <- getDesignSet(design = design, deltaWT = c(0.3, 0.4)) dataset <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) stageResults <- getStageResults(design, dataset) suppressWarnings(designPlan <- getSampleSizeMeans(design)) simulationResults <- getSimulationSurvival(design, maxNumberOfSubjects = 1200, plannedEvents = c(50, 100, 150, 200), seed = 12345 ) piecewiseSurvivalTime <- getPiecewiseSurvivalTime(list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007 ), hazardRatio = 0.8) accrualTime <- getAccrualTime(list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45 ), maxNumberOfSubjects = 1400) expect_vector(names(design)) expect_vector(names(designFisher)) expect_vector(names(designCharacteristics)) expect_vector(names(powerAndASN)) expect_vector(names(designSet)) expect_vector(names(dataset)) expect_vector(names(stageResults)) expect_vector(names(designPlan)) expect_vector(names(simulationResults)) expect_vector(names(piecewiseSurvivalTime)) expect_vector(names(accrualTime)) expect_output(print(design)) expect_output(print(designFisher)) expect_output(print(designCharacteristics)) expect_output(print(powerAndASN)) expect_output(print(designSet)) expect_output(print(dataset)) expect_output(print(stageResults)) expect_output(print(designPlan)) expect_output(print(simulationResults)) expect_output(print(piecewiseSurvivalTime)) expect_output(print(accrualTime)) expect_output(summary(design)$show()) expect_output(summary(designFisher)$show()) expect_output(summary(designCharacteristics)$show()) expect_output(summary(powerAndASN)) expect_output(print(summary(designSet))) expect_output(summary(dataset)$show()) expect_output(summary(stageResults)) expect_output(summary(designPlan)$show()) expect_output(summary(simulationResults)$show()) expect_output(summary(piecewiseSurvivalTime)) expect_output(summary(accrualTime)) expect_named(as.data.frame(design)) expect_named(as.data.frame(designFisher)) expect_named(as.data.frame(designCharacteristics)) expect_named(as.data.frame(powerAndASN)) expect_named(as.data.frame(designSet)) expect_named(as.data.frame(dataset)) expect_named(as.data.frame(stageResults)) expect_named(as.data.frame(designPlan)) expect_named(as.data.frame(simulationResults)) expect_named(as.data.frame(piecewiseSurvivalTime)) expect_named(as.data.frame(accrualTime)) expect_s3_class(as.data.frame(design, niceColumnNamesEnabled = FALSE), "data.frame") expect_s3_class(as.data.frame(designFisher, niceColumnNamesEnabled = FALSE), "data.frame") expect_s3_class(as.data.frame(designCharacteristics, niceColumnNamesEnabled = FALSE), "data.frame") expect_s3_class(as.data.frame(powerAndASN, niceColumnNamesEnabled = FALSE), "data.frame") expect_s3_class(as.data.frame(designSet, niceColumnNamesEnabled = FALSE), "data.frame") expect_s3_class(as.data.frame(dataset, niceColumnNamesEnabled = FALSE), "data.frame") expect_s3_class(as.data.frame(stageResults, niceColumnNamesEnabled = FALSE), "data.frame") expect_s3_class(as.data.frame(designPlan, niceColumnNamesEnabled = FALSE), "data.frame") expect_s3_class(as.data.frame(simulationResults, niceColumnNamesEnabled = FALSE), "data.frame") expect_s3_class(as.data.frame(piecewiseSurvivalTime, niceColumnNamesEnabled = FALSE), "data.frame") expect_s3_class(as.data.frame(accrualTime, niceColumnNamesEnabled = FALSE), "data.frame") expect_type(as.matrix(design), "character") expect_type(as.matrix(designFisher), "character") expect_type(as.matrix(designCharacteristics), "double") expect_type(as.matrix(powerAndASN), "double") expect_type(as.matrix(designSet), "character") expect_type(as.matrix(dataset), "double") expect_type(as.matrix(stageResults), "character") expect_type(as.matrix(designPlan), "double") expect_type(as.matrix(simulationResults), "double") expect_type(as.matrix(piecewiseSurvivalTime), "double") expect_type(as.matrix(accrualTime), "double") suppressWarnings(analysisResults <- getAnalysisResults(design, dataset)) expect_vector(names(analysisResults)) expect_output(print(analysisResults)) expect_output(summary(analysisResults)$show()) expect_named(as.data.frame(analysisResults)) expect_s3_class(as.data.frame(analysisResults, niceColumnNamesEnabled = FALSE), "data.frame") expect_type(as.matrix(analysisResults), "character") }) rpact/tests/testthat/test-class_design_plan.R0000644000176200001440000000174314446750002021144 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-class_analysis_dataset.R ## | Creation date: 06 February 2023, 12:04:06 ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Class 'TrialDesignPlan'") test_that("Test design plan classes and utility functions", { expect_error(.addPlotSubTitleItems()) }) rpact/tests/testthat/test-f_simulation_base_means.R0000644000176200001440000045355114440602730022350 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_simulation_base_means.R ## | Creation date: 06 February 2023, 12:13:46 ## | File version: $Revision: 7067 $ ## | Last changed: $Date: 2023-06-09 12:58:32 +0200 (Fr, 09 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Simulation Means Function") test_that("'getSimulationMeans': several configurations", { # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationMeans} # @refFS[Formula]{fs:simulationOneArmMeansTestStatistics} # @refFS[Formula]{fs:simulationTwoArmMeansTestStatisticsDiff} # @refFS[Formula]{fs:simulationTwoArmMeansTestStatisticsRatio} # @refFS[Formula]{fs:testStatisticGroupSequentialWeightedAverage} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} maxNumberOfIterations <- 100 seed <- 99123 options(width = 180) maxNumberOfSubjects <- 90 informationRates <- c(0.2, 0.5, 1) plannedSubjects <- round(informationRates * maxNumberOfSubjects) x1 <- getSimulationMeans( design = getDesignInverseNormal( futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1) ), groups = 2, meanRatio = TRUE, thetaH0 = 0.4, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, seed = seed ) ## Comparison of the results of SimulationResultsMeans object 'x1' with expected results expect_equal(x1$effect, c(0.6, 0.8, 1, 1.2, 1.4, 1.6), tolerance = 1e-07) expect_equal(x1$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x1$iterations[2, ], c(96, 100, 100, 94, 97, 95)) expect_equal(x1$iterations[3, ], c(72, 68, 37, 16, 2, 2)) expect_equal(x1$overallReject, c(0.81, 0.93, 0.99, 0.99, 1, 1), tolerance = 1e-07) expect_equal(x1$rejectPerStage[1, ], c(0, 0, 0, 0.05, 0.03, 0.05), tolerance = 1e-07) expect_equal(x1$rejectPerStage[2, ], c(0.2, 0.29, 0.62, 0.78, 0.95, 0.93), tolerance = 1e-07) expect_equal(x1$rejectPerStage[3, ], c(0.61, 0.64, 0.37, 0.16, 0.02, 0.02), tolerance = 1e-07) expect_equal(x1$futilityStop, c(0.08, 0.03, 0.01, 0.01, 0, 0), tolerance = 1e-07) expect_equal(x1$futilityPerStage[1, ], c(0.04, 0, 0, 0.01, 0, 0), tolerance = 1e-07) expect_equal(x1$futilityPerStage[2, ], c(0.04, 0.03, 0.01, 0, 0, 0), tolerance = 1e-07) expect_equal(x1$earlyStop, c(0.28, 0.32, 0.63, 0.84, 0.98, 0.98), tolerance = 1e-07) expect_equal(x1$expectedNumberOfSubjects, c(76.32, 75.6, 61.65, 50.58, 45.09, 44.55), tolerance = 1e-07) expect_equal(x1$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x1$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x1$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(0.26405311, 0.35839614, 0.48830732, 0.63603264, 0.77682482, 0.82707873), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[3, ], c(0.60511343, 0.74281632, 0.84083206, 0.87094401, 0.89751119, 0.97110806), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$effect, x1$effect, tolerance = 1e-05) expect_equal(x1CodeBased$iterations, x1$iterations, tolerance = 1e-05) expect_equal(x1CodeBased$overallReject, x1$overallReject, tolerance = 1e-05) expect_equal(x1CodeBased$rejectPerStage, x1$rejectPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$futilityStop, x1$futilityStop, tolerance = 1e-05) expect_equal(x1CodeBased$futilityPerStage, x1$futilityPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$earlyStop, x1$earlyStop, tolerance = 1e-05) expect_equal(x1CodeBased$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x1CodeBased$sampleSizes, x1$sampleSizes, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPowerAchieved, x1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() x2 <- getSimulationMeans( design = getDesignInverseNormal( futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1) ), groups = 2, meanRatio = FALSE, thetaH0 = 0.2, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, seed = seed ) ## Comparison of the results of SimulationResultsMeans object 'x2' with expected results expect_equal(x2$effect, c(-0.2, 0, 0.2, 0.4, 0.6, 0.8), tolerance = 1e-07) expect_equal(x2$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x2$iterations[2, ], c(63, 73, 84, 83, 89, 96)) expect_equal(x2$iterations[3, ], c(15, 24, 42, 53, 69, 76)) expect_equal(x2$overallReject, c(0, 0.02, 0.07, 0.18, 0.33, 0.53), tolerance = 1e-07) expect_equal(x2$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x2$rejectPerStage[2, ], c(0, 0, 0.02, 0.03, 0.06, 0.1), tolerance = 1e-07) expect_equal(x2$rejectPerStage[3, ], c(0, 0.02, 0.05, 0.15, 0.27, 0.43), tolerance = 1e-07) expect_equal(x2$futilityStop, c(0.85, 0.76, 0.56, 0.44, 0.25, 0.14), tolerance = 1e-07) expect_equal(x2$futilityPerStage[1, ], c(0.37, 0.27, 0.16, 0.17, 0.11, 0.04), tolerance = 1e-07) expect_equal(x2$futilityPerStage[2, ], c(0.48, 0.49, 0.4, 0.27, 0.14, 0.1), tolerance = 1e-07) expect_equal(x2$earlyStop, c(0.85, 0.76, 0.58, 0.47, 0.31, 0.24), tolerance = 1e-07) expect_equal(x2$expectedNumberOfSubjects, c(41.76, 48.51, 59.58, 64.26, 73.08, 78.12), tolerance = 1e-07) expect_equal(x2$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x2$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x2$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0.056595809, 0.082243527, 0.1171868, 0.14183443, 0.20192022, 0.18371302), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[3, ], c(0.36165449, 0.31543938, 0.36771185, 0.4758946, 0.54527876, 0.61204049), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$effect, x2$effect, tolerance = 1e-05) expect_equal(x2CodeBased$iterations, x2$iterations, tolerance = 1e-05) expect_equal(x2CodeBased$overallReject, x2$overallReject, tolerance = 1e-05) expect_equal(x2CodeBased$rejectPerStage, x2$rejectPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$futilityStop, x2$futilityStop, tolerance = 1e-05) expect_equal(x2CodeBased$futilityPerStage, x2$futilityPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$earlyStop, x2$earlyStop, tolerance = 1e-05) expect_equal(x2CodeBased$expectedNumberOfSubjects, x2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x2CodeBased$sampleSizes, x2$sampleSizes, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPowerAchieved, x2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x3 <- getSimulationMeans( design = getDesignInverseNormal( futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1) ), groups = 1, thetaH0 = 0.2, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, seed = seed ) ## Comparison of the results of SimulationResultsMeans object 'x3' with expected results expect_equal(x3$effect, c(-0.2, 0, 0.2, 0.4, 0.6, 0.8), tolerance = 1e-07) expect_equal(x3$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x3$iterations[2, ], c(50, 71, 87, 96, 97, 99)) expect_equal(x3$iterations[3, ], c(9, 21, 63, 67, 49, 29)) expect_equal(x3$overallReject, c(0, 0.02, 0.21, 0.59, 0.94, 1), tolerance = 1e-07) expect_equal(x3$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0.01), tolerance = 1e-07) expect_equal(x3$rejectPerStage[2, ], c(0, 0, 0.03, 0.21, 0.47, 0.7), tolerance = 1e-07) expect_equal(x3$rejectPerStage[3, ], c(0, 0.02, 0.18, 0.38, 0.47, 0.29), tolerance = 1e-07) expect_equal(x3$futilityStop, c(0.91, 0.79, 0.34, 0.12, 0.04, 0), tolerance = 1e-07) expect_equal(x3$futilityPerStage[1, ], c(0.5, 0.29, 0.13, 0.04, 0.03, 0), tolerance = 1e-07) expect_equal(x3$futilityPerStage[2, ], c(0.41, 0.5, 0.21, 0.08, 0.01, 0), tolerance = 1e-07) expect_equal(x3$earlyStop, c(0.91, 0.79, 0.37, 0.33, 0.51, 0.71), tolerance = 1e-07) expect_equal(x3$expectedNumberOfSubjects, c(35.55, 46.62, 69.84, 74.07, 66.24, 57.78), tolerance = 1e-07) expect_equal(x3$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x3$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x3$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(0.047252355, 0.074094582, 0.18424333, 0.30402818, 0.54078356, 0.67131653), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[3, ], c(0.27249296, 0.30454177, 0.45212728, 0.62638376, 0.84307565, 0.91215549), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$effect, x3$effect, tolerance = 1e-05) expect_equal(x3CodeBased$iterations, x3$iterations, tolerance = 1e-05) expect_equal(x3CodeBased$overallReject, x3$overallReject, tolerance = 1e-05) expect_equal(x3CodeBased$rejectPerStage, x3$rejectPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$futilityStop, x3$futilityStop, tolerance = 1e-05) expect_equal(x3CodeBased$futilityPerStage, x3$futilityPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$earlyStop, x3$earlyStop, tolerance = 1e-05) expect_equal(x3CodeBased$expectedNumberOfSubjects, x3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x3CodeBased$sampleSizes, x3$sampleSizes, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPowerAchieved, x3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x4 <- getSimulationMeans( design = getDesignInverseNormal( futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1) ), groups = 2, meanRatio = TRUE, thetaH0 = 1.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE, seed = seed ) ## Comparison of the results of SimulationResultsMeans object 'x4' with expected results expect_equal(x4$effect, c(-0.1, 0.1, 0.3, 0.5, 0.7, 0.9), tolerance = 1e-07) expect_equal(x4$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x4$iterations[2, ], c(76, 71, 52, 52, 45, 23)) expect_equal(x4$iterations[3, ], c(31, 27, 10, 12, 3, 3)) expect_equal(x4$overallReject, c(0.01, 0.02, 0, 0, 0, 0), tolerance = 1e-07) expect_equal(x4$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x4$rejectPerStage[2, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x4$rejectPerStage[3, ], c(0.01, 0.02, 0, 0, 0, 0), tolerance = 1e-07) expect_equal(x4$futilityStop, c(0.69, 0.73, 0.9, 0.88, 0.97, 0.97), tolerance = 1e-07) expect_equal(x4$futilityPerStage[1, ], c(0.24, 0.29, 0.48, 0.48, 0.55, 0.77), tolerance = 1e-07) expect_equal(x4$futilityPerStage[2, ], c(0.45, 0.44, 0.42, 0.4, 0.42, 0.2), tolerance = 1e-07) expect_equal(x4$earlyStop, c(0.69, 0.73, 0.9, 0.88, 0.97, 0.97), tolerance = 1e-07) expect_equal(x4$expectedNumberOfSubjects, c(52.47, 49.32, 36.54, 37.44, 31.5, 25.56), tolerance = 1e-07) expect_equal(x4$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x4$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x4$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.088210955, 0.073662665, 0.032364394, 0.040456333, 0.047760081, 0.047799584), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[3, ], c(0.34802745, 0.34204022, 0.18915629, 0.18461746, 0.36492317, 0.12863193), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$effect, x4$effect, tolerance = 1e-05) expect_equal(x4CodeBased$iterations, x4$iterations, tolerance = 1e-05) expect_equal(x4CodeBased$overallReject, x4$overallReject, tolerance = 1e-05) expect_equal(x4CodeBased$rejectPerStage, x4$rejectPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$futilityStop, x4$futilityStop, tolerance = 1e-05) expect_equal(x4CodeBased$futilityPerStage, x4$futilityPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$earlyStop, x4$earlyStop, tolerance = 1e-05) expect_equal(x4CodeBased$expectedNumberOfSubjects, x4$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x4CodeBased$sampleSizes, x4$sampleSizes, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPowerAchieved, x4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x5 <- getSimulationMeans( design = getDesignInverseNormal( futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1) ), groups = 2, meanRatio = FALSE, thetaH0 = 1.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE, seed = seed ) ## Comparison of the results of SimulationResultsMeans object 'x5' with expected results expect_equal(x5$effect, c(-1.1, -0.9, -0.7, -0.5, -0.3, -0.1), tolerance = 1e-07) expect_equal(x5$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x5$iterations[2, ], c(98, 96, 88, 84, 82, 79)) expect_equal(x5$iterations[3, ], c(77, 74, 69, 58, 54, 43)) expect_equal(x5$overallReject, c(0.78, 0.71, 0.51, 0.27, 0.13, 0.04), tolerance = 1e-07) expect_equal(x5$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x5$rejectPerStage[2, ], c(0.19, 0.14, 0.08, 0.06, 0, 0), tolerance = 1e-07) expect_equal(x5$rejectPerStage[3, ], c(0.59, 0.57, 0.43, 0.21, 0.13, 0.04), tolerance = 1e-07) expect_equal(x5$futilityStop, c(0.04, 0.12, 0.23, 0.36, 0.46, 0.57), tolerance = 1e-07) expect_equal(x5$futilityPerStage[1, ], c(0.02, 0.04, 0.12, 0.16, 0.18, 0.21), tolerance = 1e-07) expect_equal(x5$futilityPerStage[2, ], c(0.02, 0.08, 0.11, 0.2, 0.28, 0.36), tolerance = 1e-07) expect_equal(x5$earlyStop, c(0.23, 0.26, 0.31, 0.42, 0.46, 0.57), tolerance = 1e-07) expect_equal(x5$expectedNumberOfSubjects, c(79.11, 77.22, 72.81, 66.78, 64.44, 58.68), tolerance = 1e-07) expect_equal(x5$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x5$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x5$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.33588936, 0.25194744, 0.19824827, 0.19178721, 0.11444971, 0.092566355), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[3, ], c(0.74226501, 0.69902839, 0.55641803, 0.50033698, 0.45636572, 0.33236099), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$effect, x5$effect, tolerance = 1e-05) expect_equal(x5CodeBased$iterations, x5$iterations, tolerance = 1e-05) expect_equal(x5CodeBased$overallReject, x5$overallReject, tolerance = 1e-05) expect_equal(x5CodeBased$rejectPerStage, x5$rejectPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$futilityStop, x5$futilityStop, tolerance = 1e-05) expect_equal(x5CodeBased$futilityPerStage, x5$futilityPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$earlyStop, x5$earlyStop, tolerance = 1e-05) expect_equal(x5CodeBased$expectedNumberOfSubjects, x5$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x5CodeBased$sampleSizes, x5$sampleSizes, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPowerAchieved, x5$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x6 <- getSimulationMeans( design = getDesignInverseNormal( futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1) ), groups = 1, thetaH0 = 0.8, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, directionUpper = FALSE, seed = seed ) ## Comparison of the results of SimulationResultsMeans object 'x6' with expected results expect_equal(x6$effect, c(-0.8, -0.6, -0.4, -0.2, 0, 0.2), tolerance = 1e-07) expect_equal(x6$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x6$iterations[2, ], c(100, 99, 96, 81, 70, 49)) expect_equal(x6$iterations[3, ], c(22, 43, 75, 57, 27, 7)) expect_equal(x6$overallReject, c(1, 0.96, 0.66, 0.26, 0.02, 0), tolerance = 1e-07) expect_equal(x6$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x6$rejectPerStage[2, ], c(0.78, 0.56, 0.13, 0.05, 0, 0), tolerance = 1e-07) expect_equal(x6$rejectPerStage[3, ], c(0.22, 0.4, 0.53, 0.21, 0.02, 0), tolerance = 1e-07) expect_equal(x6$futilityStop, c(0, 0.01, 0.12, 0.38, 0.73, 0.93), tolerance = 1e-07) expect_equal(x6$futilityPerStage[1, ], c(0, 0.01, 0.04, 0.19, 0.3, 0.51), tolerance = 1e-07) expect_equal(x6$futilityPerStage[2, ], c(0, 0, 0.08, 0.19, 0.43, 0.42), tolerance = 1e-07) expect_equal(x6$earlyStop, c(0.78, 0.57, 0.25, 0.43, 0.73, 0.93), tolerance = 1e-07) expect_equal(x6$expectedNumberOfSubjects, c(54.9, 64.08, 77.67, 65.52, 49.05, 34.38), tolerance = 1e-07) expect_equal(x6$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x6$sampleSizes[2, ], c(27, 27, 27, 27, 27, 27)) expect_equal(x6$sampleSizes[3, ], c(45, 45, 45, 45, 45, 45)) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.67267344, 0.52857476, 0.27194206, 0.18361852, 0.064769395, 0.04670856), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[3, ], c(0.81011604, 0.77276452, 0.65795757, 0.50391481, 0.35327029, 0.24591214), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$effect, x6$effect, tolerance = 1e-05) expect_equal(x6CodeBased$iterations, x6$iterations, tolerance = 1e-05) expect_equal(x6CodeBased$overallReject, x6$overallReject, tolerance = 1e-05) expect_equal(x6CodeBased$rejectPerStage, x6$rejectPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$futilityStop, x6$futilityStop, tolerance = 1e-05) expect_equal(x6CodeBased$futilityPerStage, x6$futilityPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$earlyStop, x6$earlyStop, tolerance = 1e-05) expect_equal(x6CodeBased$expectedNumberOfSubjects, x6$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x6CodeBased$sampleSizes, x6$sampleSizes, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPowerAchieved, x6$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x7 <- getSimulationMeans( design = getDesignInverseNormal( futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1) ), groups = 1, thetaH0 = -0.2, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, stDev = 3.5, alternative = seq(-1.2, -0.2, 0.2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10, 10), maxNumberOfSubjectsPerStage = c(NA, 100, 100), directionUpper = FALSE, seed = seed ) ## Comparison of the results of SimulationResultsMeans object 'x7' with expected results expect_equal(x7$effect, c(-1, -0.8, -0.6, -0.4, -0.2, 0), tolerance = 1e-07) expect_equal(x7$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x7$iterations[2, ], c(93, 97, 88, 78, 78, 74)) expect_equal(x7$iterations[3, ], c(52, 77, 69, 57, 51, 35)) expect_equal(x7$overallReject, c(0.81, 0.82, 0.59, 0.32, 0.12, 0.03), tolerance = 1e-07) expect_equal(x7$rejectPerStage[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(x7$rejectPerStage[2, ], c(0.4, 0.19, 0.12, 0.07, 0, 0), tolerance = 1e-07) expect_equal(x7$rejectPerStage[3, ], c(0.41, 0.63, 0.47, 0.25, 0.12, 0.03), tolerance = 1e-07) expect_equal(x7$futilityStop, c(0.08, 0.04, 0.19, 0.36, 0.49, 0.65), tolerance = 1e-07) expect_equal(x7$futilityPerStage[1, ], c(0.07, 0.03, 0.12, 0.22, 0.22, 0.26), tolerance = 1e-07) expect_equal(x7$futilityPerStage[2, ], c(0.01, 0.01, 0.07, 0.14, 0.27, 0.39), tolerance = 1e-07) expect_equal(x7$earlyStop, c(0.48, 0.23, 0.31, 0.43, 0.49, 0.65), tolerance = 1e-07) expect_equal(x7$expectedNumberOfSubjects, c(105.75972, 141.87769, 144.85789, 134.64079, 139.03875, 121.42333), tolerance = 1e-07) expect_equal(x7$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x7$sampleSizes[2, ], c(74.918717, 83.151367, 90.734126, 88.517379, 94.605927, 95.502536), tolerance = 1e-07) expect_equal(x7$sampleSizes[3, ], c(34.779445, 56.130993, 68.133125, 83.503922, 92.63947, 93.575595), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(0.48960058, 0.35501907, 0.33230293, 0.3239724, 0.20164899, 0.17099815), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[3, ], c(0.75975737, 0.70067902, 0.61722401, 0.51061814, 0.40378864, 0.28388391), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7), NA))) expect_output(print(x7)$show()) invisible(capture.output(expect_error(summary(x7), NA))) expect_output(summary(x7)$show()) x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL))) expect_equal(x7CodeBased$effect, x7$effect, tolerance = 1e-05) expect_equal(x7CodeBased$iterations, x7$iterations, tolerance = 1e-05) expect_equal(x7CodeBased$overallReject, x7$overallReject, tolerance = 1e-05) expect_equal(x7CodeBased$rejectPerStage, x7$rejectPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$futilityStop, x7$futilityStop, tolerance = 1e-05) expect_equal(x7CodeBased$futilityPerStage, x7$futilityPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$earlyStop, x7$earlyStop, tolerance = 1e-05) expect_equal(x7CodeBased$expectedNumberOfSubjects, x7$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x7CodeBased$sampleSizes, x7$sampleSizes, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalPowerAchieved, x7$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x7), "character") df <- as.data.frame(x7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x8 <- getSimulationMeans( design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5)), groups = 2, meanRatio = FALSE, thetaH0 = -0.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, stDev = 3.5, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10, 10), maxNumberOfSubjectsPerStage = c(NA, 100, 100), seed = seed ) ## Comparison of the results of SimulationResultsMeans object 'x8' with expected results expect_equal(x8$effect, c(0.1, 0.3, 0.5, 0.7, 0.9, 1.1), tolerance = 1e-07) expect_equal(x8$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x8$iterations[2, ], c(74, 78, 81, 81, 90, 86)) expect_equal(x8$iterations[3, ], c(30, 33, 52, 55, 67, 65)) expect_equal(x8$overallReject, c(0.04, 0.03, 0.09, 0.19, 0.35, 0.32), tolerance = 1e-07) expect_equal(x8$rejectPerStage[1, ], c(0, 0, 0, 0.01, 0, 0), tolerance = 1e-07) expect_equal(x8$rejectPerStage[2, ], c(0.02, 0.01, 0.02, 0.06, 0.1, 0.07), tolerance = 1e-07) expect_equal(x8$rejectPerStage[3, ], c(0.02, 0.02, 0.07, 0.12, 0.25, 0.25), tolerance = 1e-07) expect_equal(x8$futilityStop, c(0.68, 0.66, 0.46, 0.38, 0.23, 0.28), tolerance = 1e-07) expect_equal(x8$futilityPerStage[1, ], c(0.26, 0.22, 0.19, 0.18, 0.1, 0.14), tolerance = 1e-07) expect_equal(x8$futilityPerStage[2, ], c(0.42, 0.44, 0.27, 0.2, 0.13, 0.14), tolerance = 1e-07) expect_equal(x8$earlyStop, c(0.7, 0.67, 0.48, 0.45, 0.33, 0.35), tolerance = 1e-07) expect_equal(x8$expectedNumberOfSubjects, c(111.53284, 119.9607, 137.10925, 136.56279, 151.62676, 145.91552), tolerance = 1e-07) expect_equal(x8$sampleSizes[1, ], c(18, 18, 18, 18, 18, 18)) expect_equal(x8$sampleSizes[2, ], c(89.604753, 93.952606, 89.473054, 86.745314, 84.630171, 89.414885), tolerance = 1e-07) expect_equal(x8$sampleSizes[3, ], c(90.75107, 86.902014, 89.684764, 87.816529, 85.760605, 78.490341), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.22129636, 0.2212372, 0.27604385, 0.2610371, 0.30108411, 0.26964038), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[3, ], c(0.30043836, 0.34051211, 0.31802231, 0.36816554, 0.50585406, 0.52804861), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8), NA))) expect_output(print(x8)$show()) invisible(capture.output(expect_error(summary(x8), NA))) expect_output(summary(x8)$show()) x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL))) expect_equal(x8CodeBased$effect, x8$effect, tolerance = 1e-05) expect_equal(x8CodeBased$iterations, x8$iterations, tolerance = 1e-05) expect_equal(x8CodeBased$overallReject, x8$overallReject, tolerance = 1e-05) expect_equal(x8CodeBased$rejectPerStage, x8$rejectPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$futilityStop, x8$futilityStop, tolerance = 1e-05) expect_equal(x8CodeBased$futilityPerStage, x8$futilityPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$earlyStop, x8$earlyStop, tolerance = 1e-05) expect_equal(x8CodeBased$expectedNumberOfSubjects, x8$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x8CodeBased$sampleSizes, x8$sampleSizes, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalPowerAchieved, x8$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x8), "character") df <- as.data.frame(x8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x9 <- getSimulationMeans( design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5)), groups = 2, meanRatio = TRUE, thetaH0 = 1.6, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = c(1, 3, 3), stDev = 1.5, alternative = seq(0.8, 1.6, 0.2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10, 10), maxNumberOfSubjectsPerStage = c(NA, 100, 100), directionUpper = FALSE, seed = seed ) ## Comparison of the results of SimulationResultsMeans object 'x9' with expected results expect_equal(x9$effect, c(-0.8, -0.6, -0.4, -0.2, 0), tolerance = 1e-07) expect_equal(x9$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x9$iterations[2, ], c(95, 90, 82, 75, 68)) expect_equal(x9$iterations[3, ], c(73, 68, 53, 48, 26)) expect_equal(x9$overallReject, c(0.55, 0.37, 0.22, 0.1, 0.01), tolerance = 1e-07) expect_equal(x9$rejectPerStage[1, ], c(0, 0, 0, 0, 0)) expect_equal(x9$rejectPerStage[2, ], c(0.13, 0.08, 0.06, 0.04, 0), tolerance = 1e-07) expect_equal(x9$rejectPerStage[3, ], c(0.42, 0.29, 0.16, 0.06, 0.01), tolerance = 1e-07) expect_equal(x9$futilityStop, c(0.14, 0.24, 0.41, 0.48, 0.74), tolerance = 1e-07) expect_equal(x9$futilityPerStage[1, ], c(0.05, 0.1, 0.18, 0.25, 0.32), tolerance = 1e-07) expect_equal(x9$futilityPerStage[2, ], c(0.09, 0.14, 0.23, 0.23, 0.42), tolerance = 1e-07) expect_equal(x9$earlyStop, c(0.27, 0.32, 0.47, 0.52, 0.74), tolerance = 1e-07) expect_equal(x9$expectedNumberOfSubjects, c(159.13638, 155.22411, 142.49895, 133.05841, 108.89569), tolerance = 1e-07) expect_equal(x9$sampleSizes[1, ], c(18, 18, 18, 18, 18)) expect_equal(x9$sampleSizes[2, ], c(85.987506, 91.370107, 92.601585, 94.55466, 96.567372), tolerance = 1e-07) expect_equal(x9$sampleSizes[3, ], c(81.435959, 80.869134, 91.633298, 91.963359, 97.037972), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.43130186, 0.31089581, 0.32119313, 0.2350347, 0.16563188), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[3, ], c(0.64594535, 0.57199764, 0.39418023, 0.33812857, 0.31423783), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x9), NA))) expect_output(print(x9)$show()) invisible(capture.output(expect_error(summary(x9), NA))) expect_output(summary(x9)$show()) x9CodeBased <- eval(parse(text = getObjectRCode(x9, stringWrapParagraphWidth = NULL))) expect_equal(x9CodeBased$effect, x9$effect, tolerance = 1e-05) expect_equal(x9CodeBased$iterations, x9$iterations, tolerance = 1e-05) expect_equal(x9CodeBased$overallReject, x9$overallReject, tolerance = 1e-05) expect_equal(x9CodeBased$rejectPerStage, x9$rejectPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$futilityStop, x9$futilityStop, tolerance = 1e-05) expect_equal(x9CodeBased$futilityPerStage, x9$futilityPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$earlyStop, x9$earlyStop, tolerance = 1e-05) expect_equal(x9CodeBased$expectedNumberOfSubjects, x9$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x9CodeBased$sampleSizes, x9$sampleSizes, tolerance = 1e-05) expect_equal(x9CodeBased$conditionalPowerAchieved, x9$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x9), "character") df <- as.data.frame(x9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } calcSubjectsFunctionSimulationBaseMeans <- function(..., stage, thetaH0, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, sampleSizesPerStage, thetaH1, conditionalPower, conditionalCriticalValue) { mult <- 1 if (stage == 2) { stageSubjects <- (1 + 1 / allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned)) * (max(0, conditionalCriticalValue + stats::qnorm(conditionalPower)))^2 * mult / (max(1e-12, thetaH1))^2 stageSubjects <- min( max(minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage] ) } else { stageSubjects <- sampleSizesPerStage[stage - 1] } return(stageSubjects) } x10 <- getSimulationMeans( design = getDesignInverseNormal(futilityBounds = c(0.5, 0.5)), groups = 2, meanRatio = TRUE, thetaH0 = 1.6, plannedSubjects = c(80, 160, 240), maxNumberOfIterations = maxNumberOfIterations, stDev = 1.5, alternative = seq(0.8, 1.6, 0.2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 40, 40), maxNumberOfSubjectsPerStage = c(NA, 400, 400), allocationRatioPlanned = 3, directionUpper = FALSE, seed = seed, calcSubjectsFunction = calcSubjectsFunctionSimulationBaseMeans ) ## Comparison of the results of SimulationResultsMeans object 'x10' with expected results expect_equal(x10$effect, c(-0.8, -0.6, -0.4, -0.2, 0), tolerance = 1e-07) expect_equal(x10$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x10$iterations[2, ], c(80, 73, 59, 46, 29)) expect_equal(x10$iterations[3, ], c(47, 49, 53, 37, 23)) expect_equal(x10$overallReject, c(0.71, 0.59, 0.3, 0.16, 0.03), tolerance = 1e-07) expect_equal(x10$rejectPerStage[1, ], c(0.01, 0, 0, 0, 0), tolerance = 1e-07) expect_equal(x10$rejectPerStage[2, ], c(0.33, 0.24, 0.05, 0.03, 0.02), tolerance = 1e-07) expect_equal(x10$rejectPerStage[3, ], c(0.37, 0.35, 0.25, 0.13, 0.01), tolerance = 1e-07) expect_equal(x10$futilityStop, c(0.19, 0.27, 0.42, 0.6, 0.75), tolerance = 1e-07) expect_equal(x10$futilityPerStage[1, ], c(0.19, 0.27, 0.41, 0.54, 0.71), tolerance = 1e-07) expect_equal(x10$futilityPerStage[2, ], c(0, 0, 0.01, 0.06, 0.04), tolerance = 1e-07) expect_equal(x10$earlyStop, c(0.53, 0.51, 0.47, 0.63, 0.77), tolerance = 1e-07) expect_equal(x10$expectedNumberOfSubjects, c(275.20455, 279.99813, 331.87372, 312.93302, 202.36219), tolerance = 1e-07) expect_equal(x10$sampleSizes[1, ], c(80, 80, 80, 80, 80)) expect_equal(x10$sampleSizes[2, ], c(160.20991, 162.95615, 228.62104, 285.92049, 236.43279), tolerance = 1e-07) expect_equal(x10$sampleSizes[3, ], c(142.63111, 165.38805, 220.73076, 274.07999, 233.89861), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.61849372, 0.63239423, 0.52503669, 0.48190934, 0.5387573), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[3, ], c(0.77627313, 0.69241344, 0.58084669, 0.41531587, 0.35026151), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x10), NA))) expect_output(print(x10)$show()) invisible(capture.output(expect_error(summary(x10), NA))) expect_output(summary(x10)$show()) x10CodeBased <- eval(parse(text = getObjectRCode(x10, stringWrapParagraphWidth = NULL))) expect_equal(x10CodeBased$effect, x10$effect, tolerance = 1e-05) expect_equal(x10CodeBased$iterations, x10$iterations, tolerance = 1e-05) expect_equal(x10CodeBased$overallReject, x10$overallReject, tolerance = 1e-05) expect_equal(x10CodeBased$rejectPerStage, x10$rejectPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$futilityStop, x10$futilityStop, tolerance = 1e-05) expect_equal(x10CodeBased$futilityPerStage, x10$futilityPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$earlyStop, x10$earlyStop, tolerance = 1e-05) expect_equal(x10CodeBased$expectedNumberOfSubjects, x10$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x10CodeBased$sampleSizes, x10$sampleSizes, tolerance = 1e-05) expect_equal(x10CodeBased$conditionalPowerAchieved, x10$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x10), "character") df <- as.data.frame(x10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing Simulation Means Function in a Systematic Way") test_that("'getSimulationMeans': Fisher design with several configurations", { # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationMeans} # @refFS[Formula]{fs:simulationOneArmMeansTestStatistics} # @refFS[Formula]{fs:simulationTwoArmMeansTestStatisticsDiff} # @refFS[Formula]{fs:simulationTwoArmMeansTestStatisticsRatio} # @refFS[Formula]{fs:testStatisticGroupSequentialWeightedAverage} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} x1 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x1' with expected results expect_equal(x1$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x1$iterations[1, ], c(100, 100, 100)) expect_equal(x1$iterations[2, ], c(100, 91, 53)) expect_equal(x1$overallReject, c(0.01, 0.67, 0.93), tolerance = 1e-07) expect_equal(x1$rejectPerStage[1, ], c(0, 0.09, 0.47), tolerance = 1e-07) expect_equal(x1$rejectPerStage[2, ], c(0.01, 0.58, 0.46), tolerance = 1e-07) expect_equal(x1$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x1$earlyStop, c(0, 0.09, 0.47), tolerance = 1e-07) expect_equal(x1$expectedNumberOfSubjects, c(100.13629, 75.286263, 37.754027), tolerance = 1e-07) expect_equal(x1$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x1$sampleSizes[2, ], c(90.136293, 71.743146, 52.366088), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(0.20283076, 0.49941507, 0.64819831), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$effect, x1$effect, tolerance = 1e-05) expect_equal(x1CodeBased$iterations, x1$iterations, tolerance = 1e-05) expect_equal(x1CodeBased$overallReject, x1$overallReject, tolerance = 1e-05) expect_equal(x1CodeBased$rejectPerStage, x1$rejectPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$futilityPerStage, x1$futilityPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$earlyStop, x1$earlyStop, tolerance = 1e-05) expect_equal(x1CodeBased$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x1CodeBased$sampleSizes, x1$sampleSizes, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPowerAchieved, x1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() x2 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x2' with expected results expect_equal(x2$effect, c(-0.8, -0.4, 0), tolerance = 1e-07) expect_equal(x2$iterations[1, ], c(100, 100, 100)) expect_equal(x2$iterations[2, ], c(38, 94, 97)) expect_equal(x2$overallReject, c(0.96, 0.74, 0.06), tolerance = 1e-07) expect_equal(x2$rejectPerStage[1, ], c(0.62, 0.06, 0.03), tolerance = 1e-07) expect_equal(x2$rejectPerStage[2, ], c(0.34, 0.68, 0.03), tolerance = 1e-07) expect_equal(x2$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x2$earlyStop, c(0.62, 0.06, 0.03), tolerance = 1e-07) expect_equal(x2$expectedNumberOfSubjects, c(25.921375, 81.226383, 97.518855), tolerance = 1e-07) expect_equal(x2$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x2$sampleSizes[2, ], c(41.898355, 75.772748, 90.225624), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0.66927179, 0.47487279, 0.2338584), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$effect, x2$effect, tolerance = 1e-05) expect_equal(x2CodeBased$iterations, x2$iterations, tolerance = 1e-05) expect_equal(x2CodeBased$overallReject, x2$overallReject, tolerance = 1e-05) expect_equal(x2CodeBased$rejectPerStage, x2$rejectPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$futilityPerStage, x2$futilityPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$earlyStop, x2$earlyStop, tolerance = 1e-05) expect_equal(x2CodeBased$expectedNumberOfSubjects, x2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x2CodeBased$sampleSizes, x2$sampleSizes, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPowerAchieved, x2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x3 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x3' with expected results expect_equal(x3$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x3$iterations[1, ], c(100, 100, 100)) expect_equal(x3$iterations[2, ], c(100, 92, 64)) expect_equal(x3$overallReject, c(0, 0.62, 0.92), tolerance = 1e-07) expect_equal(x3$rejectPerStage[1, ], c(0, 0.08, 0.36), tolerance = 1e-07) expect_equal(x3$rejectPerStage[2, ], c(0, 0.54, 0.56), tolerance = 1e-07) expect_equal(x3$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x3$earlyStop, c(0, 0.08, 0.36), tolerance = 1e-07) expect_equal(x3$expectedNumberOfSubjects, c(101.14709, 82.477228, 37.608934), tolerance = 1e-07) expect_equal(x3$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x3$sampleSizes[2, ], c(91.147091, 78.779596, 43.13896), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(0.15986579, 0.45599322, 0.69664803), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$effect, x3$effect, tolerance = 1e-05) expect_equal(x3CodeBased$iterations, x3$iterations, tolerance = 1e-05) expect_equal(x3CodeBased$overallReject, x3$overallReject, tolerance = 1e-05) expect_equal(x3CodeBased$rejectPerStage, x3$rejectPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$futilityPerStage, x3$futilityPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$earlyStop, x3$earlyStop, tolerance = 1e-05) expect_equal(x3CodeBased$expectedNumberOfSubjects, x3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x3CodeBased$sampleSizes, x3$sampleSizes, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPowerAchieved, x3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x4 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x4' with expected results expect_equal(x4$effect, c(-0.8, -0.4, 0), tolerance = 1e-07) expect_equal(x4$iterations[1, ], c(100, 100, 100)) expect_equal(x4$iterations[2, ], c(65, 91, 100)) expect_equal(x4$overallReject, c(0.91, 0.73, 0.01), tolerance = 1e-07) expect_equal(x4$rejectPerStage[1, ], c(0.35, 0.09, 0), tolerance = 1e-07) expect_equal(x4$rejectPerStage[2, ], c(0.56, 0.64, 0.01), tolerance = 1e-07) expect_equal(x4$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x4$earlyStop, c(0.35, 0.09, 0), tolerance = 1e-07) expect_equal(x4$expectedNumberOfSubjects, c(38.729726, 74.553457, 106.20499), tolerance = 1e-07) expect_equal(x4$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x4$sampleSizes[2, ], c(44.199579, 70.937865, 96.204991), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.65544931, 0.50900228, 0.13524564), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$effect, x4$effect, tolerance = 1e-05) expect_equal(x4CodeBased$iterations, x4$iterations, tolerance = 1e-05) expect_equal(x4CodeBased$overallReject, x4$overallReject, tolerance = 1e-05) expect_equal(x4CodeBased$rejectPerStage, x4$rejectPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$futilityPerStage, x4$futilityPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$earlyStop, x4$earlyStop, tolerance = 1e-05) expect_equal(x4CodeBased$expectedNumberOfSubjects, x4$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x4CodeBased$sampleSizes, x4$sampleSizes, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPowerAchieved, x4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x5 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x5' with expected results expect_equal(x5$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x5$iterations[1, ], c(100, 100, 100)) expect_equal(x5$iterations[2, ], c(100, 94, 85)) expect_equal(x5$overallReject, c(0.02, 0.3, 0.65), tolerance = 1e-07) expect_equal(x5$rejectPerStage[1, ], c(0, 0.06, 0.15), tolerance = 1e-07) expect_equal(x5$rejectPerStage[2, ], c(0.02, 0.24, 0.5), tolerance = 1e-07) expect_equal(x5$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x5$earlyStop, c(0, 0.06, 0.15), tolerance = 1e-07) expect_equal(x5$expectedNumberOfSubjects, c(99.262844, 92.628587, 72.466684), tolerance = 1e-07) expect_equal(x5$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x5$sampleSizes[2, ], c(89.262844, 87.902752, 73.490217), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.21679818, 0.32589621, 0.46073426), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$effect, x5$effect, tolerance = 1e-05) expect_equal(x5CodeBased$iterations, x5$iterations, tolerance = 1e-05) expect_equal(x5CodeBased$overallReject, x5$overallReject, tolerance = 1e-05) expect_equal(x5CodeBased$rejectPerStage, x5$rejectPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$futilityPerStage, x5$futilityPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$earlyStop, x5$earlyStop, tolerance = 1e-05) expect_equal(x5CodeBased$expectedNumberOfSubjects, x5$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x5CodeBased$sampleSizes, x5$sampleSizes, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPowerAchieved, x5$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x6 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x6' with expected results expect_equal(x6$effect, c(-0.8, -0.4, 0), tolerance = 1e-07) expect_equal(x6$iterations[1, ], c(100, 100, 100)) expect_equal(x6$iterations[2, ], c(85, 94, 97)) expect_equal(x6$overallReject, c(0.73, 0.2, 0.05), tolerance = 1e-07) expect_equal(x6$rejectPerStage[1, ], c(0.15, 0.06, 0.03), tolerance = 1e-07) expect_equal(x6$rejectPerStage[2, ], c(0.58, 0.14, 0.02), tolerance = 1e-07) expect_equal(x6$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x6$earlyStop, c(0.15, 0.06, 0.03), tolerance = 1e-07) expect_equal(x6$expectedNumberOfSubjects, c(62.256855, 90.679118, 97.117191), tolerance = 1e-07) expect_equal(x6$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x6$sampleSizes[2, ], c(61.478653, 85.828849, 89.811537), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.5750772, 0.31560556, 0.25161462), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$effect, x6$effect, tolerance = 1e-05) expect_equal(x6CodeBased$iterations, x6$iterations, tolerance = 1e-05) expect_equal(x6CodeBased$overallReject, x6$overallReject, tolerance = 1e-05) expect_equal(x6CodeBased$rejectPerStage, x6$rejectPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$futilityPerStage, x6$futilityPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$earlyStop, x6$earlyStop, tolerance = 1e-05) expect_equal(x6CodeBased$expectedNumberOfSubjects, x6$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x6CodeBased$sampleSizes, x6$sampleSizes, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPowerAchieved, x6$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x7 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x7' with expected results expect_equal(x7$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x7$iterations[1, ], c(100, 100, 100)) expect_equal(x7$iterations[2, ], c(100, 98, 89)) expect_equal(x7$overallReject, c(0, 0.15, 0.75), tolerance = 1e-07) expect_equal(x7$rejectPerStage[1, ], c(0, 0.02, 0.11), tolerance = 1e-07) expect_equal(x7$rejectPerStage[2, ], c(0, 0.13, 0.64), tolerance = 1e-07) expect_equal(x7$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x7$earlyStop, c(0, 0.02, 0.11), tolerance = 1e-07) expect_equal(x7$expectedNumberOfSubjects, c(99.499784, 89.67646, 74.321885), tolerance = 1e-07) expect_equal(x7$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x7$sampleSizes[2, ], c(89.499784, 81.30251, 72.27178), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(0.19464679, 0.38425169, 0.50691811), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7), NA))) expect_output(print(x7)$show()) invisible(capture.output(expect_error(summary(x7), NA))) expect_output(summary(x7)$show()) x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL))) expect_equal(x7CodeBased$effect, x7$effect, tolerance = 1e-05) expect_equal(x7CodeBased$iterations, x7$iterations, tolerance = 1e-05) expect_equal(x7CodeBased$overallReject, x7$overallReject, tolerance = 1e-05) expect_equal(x7CodeBased$rejectPerStage, x7$rejectPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$futilityPerStage, x7$futilityPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$earlyStop, x7$earlyStop, tolerance = 1e-05) expect_equal(x7CodeBased$expectedNumberOfSubjects, x7$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x7CodeBased$sampleSizes, x7$sampleSizes, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalPowerAchieved, x7$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x7), "character") df <- as.data.frame(x7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x8 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x8' with expected results expect_equal(x8$effect, c(-0.7, -0.4, 0), tolerance = 1e-07) expect_equal(x8$iterations[1, ], c(100, 100, 100)) expect_equal(x8$iterations[2, ], c(92, 96, 100)) expect_equal(x8$overallReject, c(0.6, 0.28, 0.01), tolerance = 1e-07) expect_equal(x8$rejectPerStage[1, ], c(0.08, 0.04, 0), tolerance = 1e-07) expect_equal(x8$rejectPerStage[2, ], c(0.52, 0.24, 0.01), tolerance = 1e-07) expect_equal(x8$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x8$earlyStop, c(0.08, 0.04, 0), tolerance = 1e-07) expect_equal(x8$expectedNumberOfSubjects, c(75.059866, 89.365281, 105.96832), tolerance = 1e-07) expect_equal(x8$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x8$sampleSizes[2, ], c(70.717246, 82.672167, 95.968315), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.47813695, 0.33190551, 0.14267564), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8), NA))) expect_output(print(x8)$show()) invisible(capture.output(expect_error(summary(x8), NA))) expect_output(summary(x8)$show()) x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL))) expect_equal(x8CodeBased$effect, x8$effect, tolerance = 1e-05) expect_equal(x8CodeBased$iterations, x8$iterations, tolerance = 1e-05) expect_equal(x8CodeBased$overallReject, x8$overallReject, tolerance = 1e-05) expect_equal(x8CodeBased$rejectPerStage, x8$rejectPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$futilityPerStage, x8$futilityPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$earlyStop, x8$earlyStop, tolerance = 1e-05) expect_equal(x8CodeBased$expectedNumberOfSubjects, x8$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x8CodeBased$sampleSizes, x8$sampleSizes, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalPowerAchieved, x8$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x8), "character") df <- as.data.frame(x8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x9 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x9' with expected results expect_equal(x9$effect, c(0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x9$iterations[1, ], c(100, 100, 100)) expect_equal(x9$iterations[2, ], c(99, 94, 80)) expect_equal(x9$overallReject, c(0.06, 0.4, 0.86), tolerance = 1e-07) expect_equal(x9$rejectPerStage[1, ], c(0.01, 0.06, 0.2), tolerance = 1e-07) expect_equal(x9$rejectPerStage[2, ], c(0.05, 0.34, 0.66), tolerance = 1e-07) expect_equal(x9$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x9$earlyStop, c(0.01, 0.06, 0.2), tolerance = 1e-07) expect_equal(x9$expectedNumberOfSubjects, c(96.293417, 87.052198, 59.545442), tolerance = 1e-07) expect_equal(x9$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x9$sampleSizes[2, ], c(87.165067, 81.970424, 61.931803), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.23503536, 0.37772778, 0.53734864), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x9), NA))) expect_output(print(x9)$show()) invisible(capture.output(expect_error(summary(x9), NA))) expect_output(summary(x9)$show()) x9CodeBased <- eval(parse(text = getObjectRCode(x9, stringWrapParagraphWidth = NULL))) expect_equal(x9CodeBased$effect, x9$effect, tolerance = 1e-05) expect_equal(x9CodeBased$iterations, x9$iterations, tolerance = 1e-05) expect_equal(x9CodeBased$overallReject, x9$overallReject, tolerance = 1e-05) expect_equal(x9CodeBased$rejectPerStage, x9$rejectPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$futilityPerStage, x9$futilityPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$earlyStop, x9$earlyStop, tolerance = 1e-05) expect_equal(x9CodeBased$expectedNumberOfSubjects, x9$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x9CodeBased$sampleSizes, x9$sampleSizes, tolerance = 1e-05) expect_equal(x9CodeBased$conditionalPowerAchieved, x9$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x9), "character") df <- as.data.frame(x9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x10 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x10' with expected results expect_equal(x10$effect, c(-0.7, -0.4, 0), tolerance = 1e-07) expect_equal(x10$iterations[1, ], c(100, 100, 100)) expect_equal(x10$iterations[2, ], c(89, 93, 98)) expect_equal(x10$overallReject, c(0.66, 0.31, 0.04), tolerance = 1e-07) expect_equal(x10$rejectPerStage[1, ], c(0.11, 0.07, 0.02), tolerance = 1e-07) expect_equal(x10$rejectPerStage[2, ], c(0.55, 0.24, 0.02), tolerance = 1e-07) expect_equal(x10$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x10$earlyStop, c(0.11, 0.07, 0.02), tolerance = 1e-07) expect_equal(x10$expectedNumberOfSubjects, c(64.458245, 88.745903, 98.117191), tolerance = 1e-07) expect_equal(x10$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x10$sampleSizes[2, ], c(61.189039, 84.673014, 89.915501), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.53544626, 0.3174792, 0.23558604), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x10), NA))) expect_output(print(x10)$show()) invisible(capture.output(expect_error(summary(x10), NA))) expect_output(summary(x10)$show()) x10CodeBased <- eval(parse(text = getObjectRCode(x10, stringWrapParagraphWidth = NULL))) expect_equal(x10CodeBased$effect, x10$effect, tolerance = 1e-05) expect_equal(x10CodeBased$iterations, x10$iterations, tolerance = 1e-05) expect_equal(x10CodeBased$overallReject, x10$overallReject, tolerance = 1e-05) expect_equal(x10CodeBased$rejectPerStage, x10$rejectPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$futilityPerStage, x10$futilityPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$earlyStop, x10$earlyStop, tolerance = 1e-05) expect_equal(x10CodeBased$expectedNumberOfSubjects, x10$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x10CodeBased$sampleSizes, x10$sampleSizes, tolerance = 1e-05) expect_equal(x10CodeBased$conditionalPowerAchieved, x10$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x10), "character") df <- as.data.frame(x10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x11 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x11' with expected results expect_equal(x11$effect, c(0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x11$iterations[1, ], c(100, 100, 100)) expect_equal(x11$iterations[2, ], c(98, 96, 79)) expect_equal(x11$overallReject, c(0.03, 0.32, 0.77), tolerance = 1e-07) expect_equal(x11$rejectPerStage[1, ], c(0.02, 0.04, 0.21), tolerance = 1e-07) expect_equal(x11$rejectPerStage[2, ], c(0.01, 0.28, 0.56), tolerance = 1e-07) expect_equal(x11$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x11$earlyStop, c(0.02, 0.04, 0.21), tolerance = 1e-07) expect_equal(x11$expectedNumberOfSubjects, c(96.685833, 88.962444, 54.461927), tolerance = 1e-07) expect_equal(x11$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x11$sampleSizes[2, ], c(88.454932, 82.252546, 56.28092), tolerance = 1e-07) expect_equal(x11$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x11$conditionalPowerAchieved[2, ], c(0.21899188, 0.34972634, 0.63085287), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x11), NA))) expect_output(print(x11)$show()) invisible(capture.output(expect_error(summary(x11), NA))) expect_output(summary(x11)$show()) x11CodeBased <- eval(parse(text = getObjectRCode(x11, stringWrapParagraphWidth = NULL))) expect_equal(x11CodeBased$effect, x11$effect, tolerance = 1e-05) expect_equal(x11CodeBased$iterations, x11$iterations, tolerance = 1e-05) expect_equal(x11CodeBased$overallReject, x11$overallReject, tolerance = 1e-05) expect_equal(x11CodeBased$rejectPerStage, x11$rejectPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$futilityPerStage, x11$futilityPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$earlyStop, x11$earlyStop, tolerance = 1e-05) expect_equal(x11CodeBased$expectedNumberOfSubjects, x11$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x11CodeBased$sampleSizes, x11$sampleSizes, tolerance = 1e-05) expect_equal(x11CodeBased$conditionalPowerAchieved, x11$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x11), "character") df <- as.data.frame(x11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x12 <- getSimulationMeans( seed = 1234, getDesignFisher(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x12' with expected results expect_equal(x12$effect, c(-0.7, -0.4, 0), tolerance = 1e-07) expect_equal(x12$iterations[1, ], c(100, 100, 100)) expect_equal(x12$iterations[2, ], c(92, 96, 100)) expect_equal(x12$overallReject, c(0.6, 0.28, 0.01), tolerance = 1e-07) expect_equal(x12$rejectPerStage[1, ], c(0.08, 0.04, 0), tolerance = 1e-07) expect_equal(x12$rejectPerStage[2, ], c(0.52, 0.24, 0.01), tolerance = 1e-07) expect_equal(x12$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x12$earlyStop, c(0.08, 0.04, 0), tolerance = 1e-07) expect_equal(x12$expectedNumberOfSubjects, c(75.059866, 89.365281, 105.96832), tolerance = 1e-07) expect_equal(x12$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x12$sampleSizes[2, ], c(70.717246, 82.672167, 95.968315), tolerance = 1e-07) expect_equal(x12$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x12$conditionalPowerAchieved[2, ], c(0.47813695, 0.33190551, 0.14267564), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x12), NA))) expect_output(print(x12)$show()) invisible(capture.output(expect_error(summary(x12), NA))) expect_output(summary(x12)$show()) x12CodeBased <- eval(parse(text = getObjectRCode(x12, stringWrapParagraphWidth = NULL))) expect_equal(x12CodeBased$effect, x12$effect, tolerance = 1e-05) expect_equal(x12CodeBased$iterations, x12$iterations, tolerance = 1e-05) expect_equal(x12CodeBased$overallReject, x12$overallReject, tolerance = 1e-05) expect_equal(x12CodeBased$rejectPerStage, x12$rejectPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$futilityPerStage, x12$futilityPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$earlyStop, x12$earlyStop, tolerance = 1e-05) expect_equal(x12CodeBased$expectedNumberOfSubjects, x12$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x12CodeBased$sampleSizes, x12$sampleSizes, tolerance = 1e-05) expect_equal(x12CodeBased$conditionalPowerAchieved, x12$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x12), "character") df <- as.data.frame(x12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMeans': inverse normal design with several configurations", { # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationMeans} # @refFS[Formula]{fs:simulationOneArmMeansTestStatistics} # @refFS[Formula]{fs:simulationTwoArmMeansTestStatisticsDiff} # @refFS[Formula]{fs:simulationTwoArmMeansTestStatisticsRatio} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} x1 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x1' with expected results expect_equal(x1$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x1$iterations[1, ], c(100, 100, 100)) expect_equal(x1$iterations[2, ], c(100, 99, 93)) expect_equal(x1$overallReject, c(0.01, 0.62, 0.84), tolerance = 1e-07) expect_equal(x1$rejectPerStage[1, ], c(0, 0.01, 0.07), tolerance = 1e-07) expect_equal(x1$rejectPerStage[2, ], c(0.01, 0.61, 0.77), tolerance = 1e-07) expect_equal(x1$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x1$earlyStop, c(0, 0.01, 0.07), tolerance = 1e-07) expect_equal(x1$expectedNumberOfSubjects, c(97.726214, 61.386317, 35.456429), tolerance = 1e-07) expect_equal(x1$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x1$sampleSizes[2, ], c(87.726214, 51.905371, 27.372504), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(0.21948944, 0.59945542, 0.74761937), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$effect, x1$effect, tolerance = 1e-05) expect_equal(x1CodeBased$iterations, x1$iterations, tolerance = 1e-05) expect_equal(x1CodeBased$overallReject, x1$overallReject, tolerance = 1e-05) expect_equal(x1CodeBased$rejectPerStage, x1$rejectPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$futilityPerStage, x1$futilityPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$earlyStop, x1$earlyStop, tolerance = 1e-05) expect_equal(x1CodeBased$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x1CodeBased$sampleSizes, x1$sampleSizes, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPowerAchieved, x1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() x2 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x2' with expected results expect_equal(x2$effect, c(-0.8, -0.4, 0), tolerance = 1e-07) expect_equal(x2$iterations[1, ], c(100, 100, 100)) expect_equal(x2$iterations[2, ], c(92, 98, 100)) expect_equal(x2$overallReject, c(0.88, 0.7, 0.05), tolerance = 1e-07) expect_equal(x2$rejectPerStage[1, ], c(0.08, 0.02, 0), tolerance = 1e-07) expect_equal(x2$rejectPerStage[2, ], c(0.8, 0.68, 0.05), tolerance = 1e-07) expect_equal(x2$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x2$earlyStop, c(0.08, 0.02, 0), tolerance = 1e-07) expect_equal(x2$expectedNumberOfSubjects, c(30.529806, 74.585778, 94.761842), tolerance = 1e-07) expect_equal(x2$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x2$sampleSizes[2, ], c(22.315007, 65.903855, 84.761842), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0.78002751, 0.51035441, 0.27866093), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$effect, x2$effect, tolerance = 1e-05) expect_equal(x2CodeBased$iterations, x2$iterations, tolerance = 1e-05) expect_equal(x2CodeBased$overallReject, x2$overallReject, tolerance = 1e-05) expect_equal(x2CodeBased$rejectPerStage, x2$rejectPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$futilityPerStage, x2$futilityPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$earlyStop, x2$earlyStop, tolerance = 1e-05) expect_equal(x2CodeBased$expectedNumberOfSubjects, x2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x2CodeBased$sampleSizes, x2$sampleSizes, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPowerAchieved, x2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x3 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x3' with expected results expect_equal(x3$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x3$iterations[1, ], c(100, 100, 100)) expect_equal(x3$iterations[2, ], c(100, 100, 98)) expect_equal(x3$overallReject, c(0.01, 0.58, 0.86), tolerance = 1e-07) expect_equal(x3$rejectPerStage[1, ], c(0, 0, 0.02), tolerance = 1e-07) expect_equal(x3$rejectPerStage[2, ], c(0.01, 0.58, 0.84), tolerance = 1e-07) expect_equal(x3$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x3$earlyStop, c(0, 0, 0.02), tolerance = 1e-07) expect_equal(x3$expectedNumberOfSubjects, c(99.571933, 69.623473, 35.859349), tolerance = 1e-07) expect_equal(x3$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x3$sampleSizes[2, ], c(89.571933, 59.623473, 26.38709), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(0.17531256, 0.54483038, 0.79325539), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$effect, x3$effect, tolerance = 1e-05) expect_equal(x3CodeBased$iterations, x3$iterations, tolerance = 1e-05) expect_equal(x3CodeBased$overallReject, x3$overallReject, tolerance = 1e-05) expect_equal(x3CodeBased$rejectPerStage, x3$rejectPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$futilityPerStage, x3$futilityPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$earlyStop, x3$earlyStop, tolerance = 1e-05) expect_equal(x3CodeBased$expectedNumberOfSubjects, x3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x3CodeBased$sampleSizes, x3$sampleSizes, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPowerAchieved, x3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x4 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x4' with expected results expect_equal(x4$effect, c(-0.8, -0.4, 0), tolerance = 1e-07) expect_equal(x4$iterations[1, ], c(100, 100, 100)) expect_equal(x4$iterations[2, ], c(97, 100, 100)) expect_equal(x4$overallReject, c(0.83, 0.69, 0.01), tolerance = 1e-07) expect_equal(x4$rejectPerStage[1, ], c(0.03, 0, 0), tolerance = 1e-07) expect_equal(x4$rejectPerStage[2, ], c(0.8, 0.69, 0.01), tolerance = 1e-07) expect_equal(x4$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x4$earlyStop, c(0.03, 0, 0), tolerance = 1e-07) expect_equal(x4$expectedNumberOfSubjects, c(34.808208, 66.656932, 104.30185), tolerance = 1e-07) expect_equal(x4$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x4$sampleSizes[2, ], c(25.575472, 56.656932, 94.301853), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.78184392, 0.58902983, 0.17869551), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$effect, x4$effect, tolerance = 1e-05) expect_equal(x4CodeBased$iterations, x4$iterations, tolerance = 1e-05) expect_equal(x4CodeBased$overallReject, x4$overallReject, tolerance = 1e-05) expect_equal(x4CodeBased$rejectPerStage, x4$rejectPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$futilityPerStage, x4$futilityPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$earlyStop, x4$earlyStop, tolerance = 1e-05) expect_equal(x4CodeBased$expectedNumberOfSubjects, x4$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x4CodeBased$sampleSizes, x4$sampleSizes, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPowerAchieved, x4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x5 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x5' with expected results expect_equal(x5$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x5$iterations[1, ], c(100, 100, 100)) expect_equal(x5$iterations[2, ], c(100, 100, 100)) expect_equal(x5$overallReject, c(0.02, 0.29, 0.63), tolerance = 1e-07) expect_equal(x5$rejectPerStage[1, ], c(0, 0, 0)) expect_equal(x5$rejectPerStage[2, ], c(0.02, 0.29, 0.63), tolerance = 1e-07) expect_equal(x5$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x5$earlyStop, c(0, 0, 0)) expect_equal(x5$expectedNumberOfSubjects, c(96.372889, 89.619156, 71.907268), tolerance = 1e-07) expect_equal(x5$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x5$sampleSizes[2, ], c(86.372889, 79.619156, 61.907268), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.23254121, 0.37156759, 0.50696796), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$effect, x5$effect, tolerance = 1e-05) expect_equal(x5CodeBased$iterations, x5$iterations, tolerance = 1e-05) expect_equal(x5CodeBased$overallReject, x5$overallReject, tolerance = 1e-05) expect_equal(x5CodeBased$rejectPerStage, x5$rejectPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$futilityPerStage, x5$futilityPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$earlyStop, x5$earlyStop, tolerance = 1e-05) expect_equal(x5CodeBased$expectedNumberOfSubjects, x5$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x5CodeBased$sampleSizes, x5$sampleSizes, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPowerAchieved, x5$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x6 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x6' with expected results expect_equal(x6$effect, c(-0.8, -0.4, 0), tolerance = 1e-07) expect_equal(x6$iterations[1, ], c(100, 100, 100)) expect_equal(x6$iterations[2, ], c(98, 98, 100)) expect_equal(x6$overallReject, c(0.71, 0.28, 0.05), tolerance = 1e-07) expect_equal(x6$rejectPerStage[1, ], c(0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x6$rejectPerStage[2, ], c(0.69, 0.26, 0.05), tolerance = 1e-07) expect_equal(x6$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x6$earlyStop, c(0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x6$expectedNumberOfSubjects, c(61.262488, 89.754099, 94.761842), tolerance = 1e-07) expect_equal(x6$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x6$sampleSizes[2, ], c(52.308661, 81.381734, 84.761842), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.57015973, 0.32347024, 0.27139992), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$effect, x6$effect, tolerance = 1e-05) expect_equal(x6CodeBased$iterations, x6$iterations, tolerance = 1e-05) expect_equal(x6CodeBased$overallReject, x6$overallReject, tolerance = 1e-05) expect_equal(x6CodeBased$rejectPerStage, x6$rejectPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$futilityPerStage, x6$futilityPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$earlyStop, x6$earlyStop, tolerance = 1e-05) expect_equal(x6CodeBased$expectedNumberOfSubjects, x6$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x6CodeBased$sampleSizes, x6$sampleSizes, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPowerAchieved, x6$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x7 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x7' with expected results expect_equal(x7$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x7$iterations[1, ], c(100, 100, 100)) expect_equal(x7$iterations[2, ], c(100, 100, 99)) expect_equal(x7$overallReject, c(0.01, 0.2, 0.7), tolerance = 1e-07) expect_equal(x7$rejectPerStage[1, ], c(0, 0, 0.01), tolerance = 1e-07) expect_equal(x7$rejectPerStage[2, ], c(0.01, 0.2, 0.69), tolerance = 1e-07) expect_equal(x7$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x7$earlyStop, c(0, 0, 0.01), tolerance = 1e-07) expect_equal(x7$expectedNumberOfSubjects, c(99.874349, 85.385224, 62.337209), tolerance = 1e-07) expect_equal(x7$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x7$sampleSizes[2, ], c(89.874349, 75.385224, 52.865867), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(0.19979349, 0.40152955, 0.59344307), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7), NA))) expect_output(print(x7)$show()) invisible(capture.output(expect_error(summary(x7), NA))) expect_output(summary(x7)$show()) x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL))) expect_equal(x7CodeBased$effect, x7$effect, tolerance = 1e-05) expect_equal(x7CodeBased$iterations, x7$iterations, tolerance = 1e-05) expect_equal(x7CodeBased$overallReject, x7$overallReject, tolerance = 1e-05) expect_equal(x7CodeBased$rejectPerStage, x7$rejectPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$futilityPerStage, x7$futilityPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$earlyStop, x7$earlyStop, tolerance = 1e-05) expect_equal(x7CodeBased$expectedNumberOfSubjects, x7$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x7CodeBased$sampleSizes, x7$sampleSizes, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalPowerAchieved, x7$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x7), "character") df <- as.data.frame(x7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x8 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x8' with expected results expect_equal(x8$effect, c(-0.7, -0.4, 0), tolerance = 1e-07) expect_equal(x8$iterations[1, ], c(100, 100, 100)) expect_equal(x8$iterations[2, ], c(99, 100, 100)) expect_equal(x8$overallReject, c(0.57, 0.35, 0.01), tolerance = 1e-07) expect_equal(x8$rejectPerStage[1, ], c(0.01, 0, 0), tolerance = 1e-07) expect_equal(x8$rejectPerStage[2, ], c(0.56, 0.35, 0.01), tolerance = 1e-07) expect_equal(x8$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x8$earlyStop, c(0.01, 0, 0), tolerance = 1e-07) expect_equal(x8$expectedNumberOfSubjects, c(65.632546, 86.865451, 105.50507), tolerance = 1e-07) expect_equal(x8$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x8$sampleSizes[2, ], c(56.194491, 76.865451, 95.50507), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.52087291, 0.38731069, 0.15906003), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8), NA))) expect_output(print(x8)$show()) invisible(capture.output(expect_error(summary(x8), NA))) expect_output(summary(x8)$show()) x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL))) expect_equal(x8CodeBased$effect, x8$effect, tolerance = 1e-05) expect_equal(x8CodeBased$iterations, x8$iterations, tolerance = 1e-05) expect_equal(x8CodeBased$overallReject, x8$overallReject, tolerance = 1e-05) expect_equal(x8CodeBased$rejectPerStage, x8$rejectPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$futilityPerStage, x8$futilityPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$earlyStop, x8$earlyStop, tolerance = 1e-05) expect_equal(x8CodeBased$expectedNumberOfSubjects, x8$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x8CodeBased$sampleSizes, x8$sampleSizes, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalPowerAchieved, x8$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x8), "character") df <- as.data.frame(x8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x9 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x9' with expected results expect_equal(x9$effect, c(0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x9$iterations[1, ], c(100, 100, 100)) expect_equal(x9$iterations[2, ], c(100, 99, 98)) expect_equal(x9$overallReject, c(0.04, 0.36, 0.79), tolerance = 1e-07) expect_equal(x9$rejectPerStage[1, ], c(0, 0.01, 0.02), tolerance = 1e-07) expect_equal(x9$rejectPerStage[2, ], c(0.04, 0.35, 0.77), tolerance = 1e-07) expect_equal(x9$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x9$earlyStop, c(0, 0.01, 0.02), tolerance = 1e-07) expect_equal(x9$expectedNumberOfSubjects, c(93.166381, 72.993336, 56.443486), tolerance = 1e-07) expect_equal(x9$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x9$sampleSizes[2, ], c(83.166381, 63.629633, 47.391312), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.26023971, 0.52016331, 0.61018937), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x9), NA))) expect_output(print(x9)$show()) invisible(capture.output(expect_error(summary(x9), NA))) expect_output(summary(x9)$show()) x9CodeBased <- eval(parse(text = getObjectRCode(x9, stringWrapParagraphWidth = NULL))) expect_equal(x9CodeBased$effect, x9$effect, tolerance = 1e-05) expect_equal(x9CodeBased$iterations, x9$iterations, tolerance = 1e-05) expect_equal(x9CodeBased$overallReject, x9$overallReject, tolerance = 1e-05) expect_equal(x9CodeBased$rejectPerStage, x9$rejectPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$futilityPerStage, x9$futilityPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$earlyStop, x9$earlyStop, tolerance = 1e-05) expect_equal(x9CodeBased$expectedNumberOfSubjects, x9$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x9CodeBased$sampleSizes, x9$sampleSizes, tolerance = 1e-05) expect_equal(x9CodeBased$conditionalPowerAchieved, x9$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x9), "character") df <- as.data.frame(x9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x10 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x10' with expected results expect_equal(x10$effect, c(-0.7, -0.4, 0), tolerance = 1e-07) expect_equal(x10$iterations[1, ], c(100, 100, 100)) expect_equal(x10$iterations[2, ], c(98, 98, 100)) expect_equal(x10$overallReject, c(0.71, 0.32, 0.05), tolerance = 1e-07) expect_equal(x10$rejectPerStage[1, ], c(0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x10$rejectPerStage[2, ], c(0.69, 0.3, 0.05), tolerance = 1e-07) expect_equal(x10$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x10$earlyStop, c(0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x10$expectedNumberOfSubjects, c(62.435526, 88.169977, 94.761842), tolerance = 1e-07) expect_equal(x10$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x10$sampleSizes[2, ], c(53.505639, 79.765282, 84.761842), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.54108822, 0.32455187, 0.25936079), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x10), NA))) expect_output(print(x10)$show()) invisible(capture.output(expect_error(summary(x10), NA))) expect_output(summary(x10)$show()) x10CodeBased <- eval(parse(text = getObjectRCode(x10, stringWrapParagraphWidth = NULL))) expect_equal(x10CodeBased$effect, x10$effect, tolerance = 1e-05) expect_equal(x10CodeBased$iterations, x10$iterations, tolerance = 1e-05) expect_equal(x10CodeBased$overallReject, x10$overallReject, tolerance = 1e-05) expect_equal(x10CodeBased$rejectPerStage, x10$rejectPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$futilityPerStage, x10$futilityPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$earlyStop, x10$earlyStop, tolerance = 1e-05) expect_equal(x10CodeBased$expectedNumberOfSubjects, x10$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x10CodeBased$sampleSizes, x10$sampleSizes, tolerance = 1e-05) expect_equal(x10CodeBased$conditionalPowerAchieved, x10$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x10), "character") df <- as.data.frame(x10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x11 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x11' with expected results expect_equal(x11$effect, c(0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x11$iterations[1, ], c(100, 100, 100)) expect_equal(x11$iterations[2, ], c(100, 100, 98)) expect_equal(x11$overallReject, c(0.04, 0.33, 0.76), tolerance = 1e-07) expect_equal(x11$rejectPerStage[1, ], c(0, 0, 0.02), tolerance = 1e-07) expect_equal(x11$rejectPerStage[2, ], c(0.04, 0.33, 0.74), tolerance = 1e-07) expect_equal(x11$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x11$earlyStop, c(0, 0, 0.02), tolerance = 1e-07) expect_equal(x11$expectedNumberOfSubjects, c(97.820553, 79.30135, 45.942964), tolerance = 1e-07) expect_equal(x11$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x11$sampleSizes[2, ], c(87.820553, 69.30135, 36.676494), tolerance = 1e-07) expect_equal(x11$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x11$conditionalPowerAchieved[2, ], c(0.24110629, 0.45389272, 0.70091861), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x11), NA))) expect_output(print(x11)$show()) invisible(capture.output(expect_error(summary(x11), NA))) expect_output(summary(x11)$show()) x11CodeBased <- eval(parse(text = getObjectRCode(x11, stringWrapParagraphWidth = NULL))) expect_equal(x11CodeBased$effect, x11$effect, tolerance = 1e-05) expect_equal(x11CodeBased$iterations, x11$iterations, tolerance = 1e-05) expect_equal(x11CodeBased$overallReject, x11$overallReject, tolerance = 1e-05) expect_equal(x11CodeBased$rejectPerStage, x11$rejectPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$futilityPerStage, x11$futilityPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$earlyStop, x11$earlyStop, tolerance = 1e-05) expect_equal(x11CodeBased$expectedNumberOfSubjects, x11$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x11CodeBased$sampleSizes, x11$sampleSizes, tolerance = 1e-05) expect_equal(x11CodeBased$conditionalPowerAchieved, x11$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x11), "character") df <- as.data.frame(x11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x12 <- getSimulationMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x12' with expected results expect_equal(x12$effect, c(-0.7, -0.4, 0), tolerance = 1e-07) expect_equal(x12$iterations[1, ], c(100, 100, 100)) expect_equal(x12$iterations[2, ], c(99, 100, 100)) expect_equal(x12$overallReject, c(0.57, 0.35, 0.01), tolerance = 1e-07) expect_equal(x12$rejectPerStage[1, ], c(0.01, 0, 0), tolerance = 1e-07) expect_equal(x12$rejectPerStage[2, ], c(0.56, 0.35, 0.01), tolerance = 1e-07) expect_equal(x12$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x12$earlyStop, c(0.01, 0, 0), tolerance = 1e-07) expect_equal(x12$expectedNumberOfSubjects, c(65.632546, 86.865451, 105.50507), tolerance = 1e-07) expect_equal(x12$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x12$sampleSizes[2, ], c(56.194491, 76.865451, 95.50507), tolerance = 1e-07) expect_equal(x12$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x12$conditionalPowerAchieved[2, ], c(0.52087291, 0.38731069, 0.15906003), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x12), NA))) expect_output(print(x12)$show()) invisible(capture.output(expect_error(summary(x12), NA))) expect_output(summary(x12)$show()) x12CodeBased <- eval(parse(text = getObjectRCode(x12, stringWrapParagraphWidth = NULL))) expect_equal(x12CodeBased$effect, x12$effect, tolerance = 1e-05) expect_equal(x12CodeBased$iterations, x12$iterations, tolerance = 1e-05) expect_equal(x12CodeBased$overallReject, x12$overallReject, tolerance = 1e-05) expect_equal(x12CodeBased$rejectPerStage, x12$rejectPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$futilityPerStage, x12$futilityPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$earlyStop, x12$earlyStop, tolerance = 1e-05) expect_equal(x12CodeBased$expectedNumberOfSubjects, x12$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x12CodeBased$sampleSizes, x12$sampleSizes, tolerance = 1e-05) expect_equal(x12CodeBased$conditionalPowerAchieved, x12$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x12), "character") df <- as.data.frame(x12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMeans': group sequential design with several configurations", { # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationMeans} # @refFS[Formula]{fs:simulationOneArmMeansTestStatistics} # @refFS[Formula]{fs:simulationTwoArmMeansTestStatisticsDiff} # @refFS[Formula]{fs:simulationTwoArmMeansTestStatisticsRatio} # @refFS[Formula]{fs:testStatisticGroupSequentialWeightedAverage} x1 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x1' with expected results expect_equal(x1$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x1$iterations[1, ], c(100, 100, 100)) expect_equal(x1$iterations[2, ], c(100, 99, 93)) expect_equal(x1$overallReject, c(0.02, 0.71, 0.93), tolerance = 1e-07) expect_equal(x1$rejectPerStage[1, ], c(0, 0.01, 0.07), tolerance = 1e-07) expect_equal(x1$rejectPerStage[2, ], c(0.02, 0.7, 0.86), tolerance = 1e-07) expect_equal(x1$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x1$earlyStop, c(0, 0.01, 0.07), tolerance = 1e-07) expect_equal(x1$expectedNumberOfSubjects, c(97.726214, 61.386317, 35.456429), tolerance = 1e-07) expect_equal(x1$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x1$sampleSizes[2, ], c(87.726214, 51.905371, 27.372504), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(0.21948944, 0.59945542, 0.74761937), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$effect, x1$effect, tolerance = 1e-05) expect_equal(x1CodeBased$iterations, x1$iterations, tolerance = 1e-05) expect_equal(x1CodeBased$overallReject, x1$overallReject, tolerance = 1e-05) expect_equal(x1CodeBased$rejectPerStage, x1$rejectPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$futilityPerStage, x1$futilityPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$earlyStop, x1$earlyStop, tolerance = 1e-05) expect_equal(x1CodeBased$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x1CodeBased$sampleSizes, x1$sampleSizes, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPowerAchieved, x1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() x2 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x2' with expected results expect_equal(x2$effect, c(-0.8, -0.4, 0), tolerance = 1e-07) expect_equal(x2$iterations[1, ], c(100, 100, 100)) expect_equal(x2$iterations[2, ], c(92, 98, 100)) expect_equal(x2$overallReject, c(0.94, 0.81, 0.07), tolerance = 1e-07) expect_equal(x2$rejectPerStage[1, ], c(0.08, 0.02, 0), tolerance = 1e-07) expect_equal(x2$rejectPerStage[2, ], c(0.86, 0.79, 0.07), tolerance = 1e-07) expect_equal(x2$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x2$earlyStop, c(0.08, 0.02, 0), tolerance = 1e-07) expect_equal(x2$expectedNumberOfSubjects, c(30.529806, 74.585778, 94.761842), tolerance = 1e-07) expect_equal(x2$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x2$sampleSizes[2, ], c(22.315007, 65.903855, 84.761842), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0.78002751, 0.51035441, 0.27866093), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$effect, x2$effect, tolerance = 1e-05) expect_equal(x2CodeBased$iterations, x2$iterations, tolerance = 1e-05) expect_equal(x2CodeBased$overallReject, x2$overallReject, tolerance = 1e-05) expect_equal(x2CodeBased$rejectPerStage, x2$rejectPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$futilityPerStage, x2$futilityPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$earlyStop, x2$earlyStop, tolerance = 1e-05) expect_equal(x2CodeBased$expectedNumberOfSubjects, x2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x2CodeBased$sampleSizes, x2$sampleSizes, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPowerAchieved, x2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x3 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x3' with expected results expect_equal(x3$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x3$iterations[1, ], c(100, 100, 100)) expect_equal(x3$iterations[2, ], c(100, 100, 98)) expect_equal(x3$overallReject, c(0.01, 0.68, 0.94), tolerance = 1e-07) expect_equal(x3$rejectPerStage[1, ], c(0, 0, 0.02), tolerance = 1e-07) expect_equal(x3$rejectPerStage[2, ], c(0.01, 0.68, 0.92), tolerance = 1e-07) expect_equal(x3$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x3$earlyStop, c(0, 0, 0.02), tolerance = 1e-07) expect_equal(x3$expectedNumberOfSubjects, c(99.571933, 69.623473, 35.859349), tolerance = 1e-07) expect_equal(x3$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x3$sampleSizes[2, ], c(89.571933, 59.623473, 26.38709), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(0.17531256, 0.54483038, 0.79325539), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$effect, x3$effect, tolerance = 1e-05) expect_equal(x3CodeBased$iterations, x3$iterations, tolerance = 1e-05) expect_equal(x3CodeBased$overallReject, x3$overallReject, tolerance = 1e-05) expect_equal(x3CodeBased$rejectPerStage, x3$rejectPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$futilityPerStage, x3$futilityPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$earlyStop, x3$earlyStop, tolerance = 1e-05) expect_equal(x3CodeBased$expectedNumberOfSubjects, x3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x3CodeBased$sampleSizes, x3$sampleSizes, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPowerAchieved, x3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x4 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 1, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x4' with expected results expect_equal(x4$effect, c(-0.8, -0.4, 0), tolerance = 1e-07) expect_equal(x4$iterations[1, ], c(100, 100, 100)) expect_equal(x4$iterations[2, ], c(97, 100, 100)) expect_equal(x4$overallReject, c(0.92, 0.78, 0.02), tolerance = 1e-07) expect_equal(x4$rejectPerStage[1, ], c(0.03, 0, 0), tolerance = 1e-07) expect_equal(x4$rejectPerStage[2, ], c(0.89, 0.78, 0.02), tolerance = 1e-07) expect_equal(x4$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x4$earlyStop, c(0.03, 0, 0), tolerance = 1e-07) expect_equal(x4$expectedNumberOfSubjects, c(34.808208, 66.656932, 104.30185), tolerance = 1e-07) expect_equal(x4$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x4$sampleSizes[2, ], c(25.575472, 56.656932, 94.301853), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.78184392, 0.58902983, 0.17869551), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$effect, x4$effect, tolerance = 1e-05) expect_equal(x4CodeBased$iterations, x4$iterations, tolerance = 1e-05) expect_equal(x4CodeBased$overallReject, x4$overallReject, tolerance = 1e-05) expect_equal(x4CodeBased$rejectPerStage, x4$rejectPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$futilityPerStage, x4$futilityPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$earlyStop, x4$earlyStop, tolerance = 1e-05) expect_equal(x4CodeBased$expectedNumberOfSubjects, x4$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x4CodeBased$sampleSizes, x4$sampleSizes, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPowerAchieved, x4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x5 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x5' with expected results expect_equal(x5$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x5$iterations[1, ], c(100, 100, 100)) expect_equal(x5$iterations[2, ], c(100, 100, 100)) expect_equal(x5$overallReject, c(0.03, 0.36, 0.74), tolerance = 1e-07) expect_equal(x5$rejectPerStage[1, ], c(0, 0, 0)) expect_equal(x5$rejectPerStage[2, ], c(0.03, 0.36, 0.74), tolerance = 1e-07) expect_equal(x5$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x5$earlyStop, c(0, 0, 0)) expect_equal(x5$expectedNumberOfSubjects, c(96.372889, 89.619156, 71.907268), tolerance = 1e-07) expect_equal(x5$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x5$sampleSizes[2, ], c(86.372889, 79.619156, 61.907268), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.23254121, 0.37156759, 0.50696796), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$effect, x5$effect, tolerance = 1e-05) expect_equal(x5CodeBased$iterations, x5$iterations, tolerance = 1e-05) expect_equal(x5CodeBased$overallReject, x5$overallReject, tolerance = 1e-05) expect_equal(x5CodeBased$rejectPerStage, x5$rejectPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$futilityPerStage, x5$futilityPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$earlyStop, x5$earlyStop, tolerance = 1e-05) expect_equal(x5CodeBased$expectedNumberOfSubjects, x5$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x5CodeBased$sampleSizes, x5$sampleSizes, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPowerAchieved, x5$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x6 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x6' with expected results expect_equal(x6$effect, c(-0.8, -0.4, 0), tolerance = 1e-07) expect_equal(x6$iterations[1, ], c(100, 100, 100)) expect_equal(x6$iterations[2, ], c(98, 98, 100)) expect_equal(x6$overallReject, c(0.79, 0.36, 0.06), tolerance = 1e-07) expect_equal(x6$rejectPerStage[1, ], c(0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x6$rejectPerStage[2, ], c(0.77, 0.34, 0.06), tolerance = 1e-07) expect_equal(x6$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x6$earlyStop, c(0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x6$expectedNumberOfSubjects, c(61.262488, 89.754099, 94.761842), tolerance = 1e-07) expect_equal(x6$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x6$sampleSizes[2, ], c(52.308661, 81.381734, 84.761842), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.57015973, 0.32347024, 0.27139992), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$effect, x6$effect, tolerance = 1e-05) expect_equal(x6CodeBased$iterations, x6$iterations, tolerance = 1e-05) expect_equal(x6CodeBased$overallReject, x6$overallReject, tolerance = 1e-05) expect_equal(x6CodeBased$rejectPerStage, x6$rejectPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$futilityPerStage, x6$futilityPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$earlyStop, x6$earlyStop, tolerance = 1e-05) expect_equal(x6CodeBased$expectedNumberOfSubjects, x6$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x6CodeBased$sampleSizes, x6$sampleSizes, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPowerAchieved, x6$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x7 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = FALSE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x7' with expected results expect_equal(x7$effect, c(-0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x7$iterations[1, ], c(100, 100, 100)) expect_equal(x7$iterations[2, ], c(100, 100, 99)) expect_equal(x7$overallReject, c(0.01, 0.23, 0.83), tolerance = 1e-07) expect_equal(x7$rejectPerStage[1, ], c(0, 0, 0.01), tolerance = 1e-07) expect_equal(x7$rejectPerStage[2, ], c(0.01, 0.23, 0.82), tolerance = 1e-07) expect_equal(x7$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x7$earlyStop, c(0, 0, 0.01), tolerance = 1e-07) expect_equal(x7$expectedNumberOfSubjects, c(99.874349, 85.385224, 62.337209), tolerance = 1e-07) expect_equal(x7$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x7$sampleSizes[2, ], c(89.874349, 75.385224, 52.865867), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(0.19979349, 0.40152955, 0.59344307), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7), NA))) expect_output(print(x7)$show()) invisible(capture.output(expect_error(summary(x7), NA))) expect_output(summary(x7)$show()) x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL))) expect_equal(x7CodeBased$effect, x7$effect, tolerance = 1e-05) expect_equal(x7CodeBased$iterations, x7$iterations, tolerance = 1e-05) expect_equal(x7CodeBased$overallReject, x7$overallReject, tolerance = 1e-05) expect_equal(x7CodeBased$rejectPerStage, x7$rejectPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$futilityPerStage, x7$futilityPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$earlyStop, x7$earlyStop, tolerance = 1e-05) expect_equal(x7CodeBased$expectedNumberOfSubjects, x7$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x7CodeBased$sampleSizes, x7$sampleSizes, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalPowerAchieved, x7$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x7), "character") df <- as.data.frame(x7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x8 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x8' with expected results expect_equal(x8$effect, c(-0.7, -0.4, 0), tolerance = 1e-07) expect_equal(x8$iterations[1, ], c(100, 100, 100)) expect_equal(x8$iterations[2, ], c(99, 100, 100)) expect_equal(x8$overallReject, c(0.72, 0.45, 0.01), tolerance = 1e-07) expect_equal(x8$rejectPerStage[1, ], c(0.01, 0, 0), tolerance = 1e-07) expect_equal(x8$rejectPerStage[2, ], c(0.71, 0.45, 0.01), tolerance = 1e-07) expect_equal(x8$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x8$earlyStop, c(0.01, 0, 0), tolerance = 1e-07) expect_equal(x8$expectedNumberOfSubjects, c(65.632546, 86.865451, 105.50507), tolerance = 1e-07) expect_equal(x8$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x8$sampleSizes[2, ], c(56.194491, 76.865451, 95.50507), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.52087291, 0.38731069, 0.15906003), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8), NA))) expect_output(print(x8)$show()) invisible(capture.output(expect_error(summary(x8), NA))) expect_output(summary(x8)$show()) x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL))) expect_equal(x8CodeBased$effect, x8$effect, tolerance = 1e-05) expect_equal(x8CodeBased$iterations, x8$iterations, tolerance = 1e-05) expect_equal(x8CodeBased$overallReject, x8$overallReject, tolerance = 1e-05) expect_equal(x8CodeBased$rejectPerStage, x8$rejectPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$futilityPerStage, x8$futilityPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$earlyStop, x8$earlyStop, tolerance = 1e-05) expect_equal(x8CodeBased$expectedNumberOfSubjects, x8$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x8CodeBased$sampleSizes, x8$sampleSizes, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalPowerAchieved, x8$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x8), "character") df <- as.data.frame(x8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x9 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x9' with expected results expect_equal(x9$effect, c(0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x9$iterations[1, ], c(100, 100, 100)) expect_equal(x9$iterations[2, ], c(100, 99, 98)) expect_equal(x9$overallReject, c(0.09, 0.44, 0.85), tolerance = 1e-07) expect_equal(x9$rejectPerStage[1, ], c(0, 0.01, 0.02), tolerance = 1e-07) expect_equal(x9$rejectPerStage[2, ], c(0.09, 0.43, 0.83), tolerance = 1e-07) expect_equal(x9$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x9$earlyStop, c(0, 0.01, 0.02), tolerance = 1e-07) expect_equal(x9$expectedNumberOfSubjects, c(93.166381, 72.993336, 56.443486), tolerance = 1e-07) expect_equal(x9$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x9$sampleSizes[2, ], c(83.166381, 63.629633, 47.391312), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.26023971, 0.52016331, 0.61018937), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x9), NA))) expect_output(print(x9)$show()) invisible(capture.output(expect_error(summary(x9), NA))) expect_output(summary(x9)$show()) x9CodeBased <- eval(parse(text = getObjectRCode(x9, stringWrapParagraphWidth = NULL))) expect_equal(x9CodeBased$effect, x9$effect, tolerance = 1e-05) expect_equal(x9CodeBased$iterations, x9$iterations, tolerance = 1e-05) expect_equal(x9CodeBased$overallReject, x9$overallReject, tolerance = 1e-05) expect_equal(x9CodeBased$rejectPerStage, x9$rejectPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$futilityPerStage, x9$futilityPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$earlyStop, x9$earlyStop, tolerance = 1e-05) expect_equal(x9CodeBased$expectedNumberOfSubjects, x9$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x9CodeBased$sampleSizes, x9$sampleSizes, tolerance = 1e-05) expect_equal(x9CodeBased$conditionalPowerAchieved, x9$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x9), "character") df <- as.data.frame(x9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x10 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = TRUE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x10' with expected results expect_equal(x10$effect, c(-0.7, -0.4, 0), tolerance = 1e-07) expect_equal(x10$iterations[1, ], c(100, 100, 100)) expect_equal(x10$iterations[2, ], c(98, 98, 100)) expect_equal(x10$overallReject, c(0.76, 0.42, 0.06), tolerance = 1e-07) expect_equal(x10$rejectPerStage[1, ], c(0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x10$rejectPerStage[2, ], c(0.74, 0.4, 0.06), tolerance = 1e-07) expect_equal(x10$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x10$earlyStop, c(0.02, 0.02, 0), tolerance = 1e-07) expect_equal(x10$expectedNumberOfSubjects, c(62.435526, 88.169977, 94.761842), tolerance = 1e-07) expect_equal(x10$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x10$sampleSizes[2, ], c(53.505639, 79.765282, 84.761842), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.54108822, 0.32455187, 0.25936079), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x10), NA))) expect_output(print(x10)$show()) invisible(capture.output(expect_error(summary(x10), NA))) expect_output(summary(x10)$show()) x10CodeBased <- eval(parse(text = getObjectRCode(x10, stringWrapParagraphWidth = NULL))) expect_equal(x10CodeBased$effect, x10$effect, tolerance = 1e-05) expect_equal(x10CodeBased$iterations, x10$iterations, tolerance = 1e-05) expect_equal(x10CodeBased$overallReject, x10$overallReject, tolerance = 1e-05) expect_equal(x10CodeBased$rejectPerStage, x10$rejectPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$futilityPerStage, x10$futilityPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$earlyStop, x10$earlyStop, tolerance = 1e-05) expect_equal(x10CodeBased$expectedNumberOfSubjects, x10$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x10CodeBased$sampleSizes, x10$sampleSizes, tolerance = 1e-05) expect_equal(x10CodeBased$conditionalPowerAchieved, x10$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x10), "character") df <- as.data.frame(x10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x11 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = TRUE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.05 ) ## Comparison of the results of SimulationResultsMeans object 'x11' with expected results expect_equal(x11$effect, c(0.05, 0.35, 0.75), tolerance = 1e-07) expect_equal(x11$iterations[1, ], c(100, 100, 100)) expect_equal(x11$iterations[2, ], c(100, 100, 98)) expect_equal(x11$overallReject, c(0.12, 0.39, 0.87), tolerance = 1e-07) expect_equal(x11$rejectPerStage[1, ], c(0, 0, 0.02), tolerance = 1e-07) expect_equal(x11$rejectPerStage[2, ], c(0.12, 0.39, 0.85), tolerance = 1e-07) expect_equal(x11$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x11$earlyStop, c(0, 0, 0.02), tolerance = 1e-07) expect_equal(x11$expectedNumberOfSubjects, c(97.820553, 79.30135, 45.942964), tolerance = 1e-07) expect_equal(x11$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x11$sampleSizes[2, ], c(87.820553, 69.30135, 36.676494), tolerance = 1e-07) expect_equal(x11$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x11$conditionalPowerAchieved[2, ], c(0.24110629, 0.45389272, 0.70091861), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x11), NA))) expect_output(print(x11)$show()) invisible(capture.output(expect_error(summary(x11), NA))) expect_output(summary(x11)$show()) x11CodeBased <- eval(parse(text = getObjectRCode(x11, stringWrapParagraphWidth = NULL))) expect_equal(x11CodeBased$effect, x11$effect, tolerance = 1e-05) expect_equal(x11CodeBased$iterations, x11$iterations, tolerance = 1e-05) expect_equal(x11CodeBased$overallReject, x11$overallReject, tolerance = 1e-05) expect_equal(x11CodeBased$rejectPerStage, x11$rejectPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$futilityPerStage, x11$futilityPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$earlyStop, x11$earlyStop, tolerance = 1e-05) expect_equal(x11CodeBased$expectedNumberOfSubjects, x11$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x11CodeBased$sampleSizes, x11$sampleSizes, tolerance = 1e-05) expect_equal(x11CodeBased$conditionalPowerAchieved, x11$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x11), "character") df <- as.data.frame(x11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x12 <- getSimulationMeans( seed = 1234, getDesignGroupSequential(informationRates = c(0.3333, 1)), normalApproximation = FALSE, groups = 2, plannedSubjects = c(10, 30), alternative = c(0.1, 0.4, 0.8), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4), maxNumberOfSubjectsPerStage = c(10, 100), stDev = 1.2, directionUpper = FALSE, meanRatio = TRUE, maxNumberOfIterations = 100, thetaH0 = 0.8 ) ## Comparison of the results of SimulationResultsMeans object 'x12' with expected results expect_equal(x12$effect, c(-0.7, -0.4, 0), tolerance = 1e-07) expect_equal(x12$iterations[1, ], c(100, 100, 100)) expect_equal(x12$iterations[2, ], c(99, 100, 100)) expect_equal(x12$overallReject, c(0.72, 0.45, 0.01), tolerance = 1e-07) expect_equal(x12$rejectPerStage[1, ], c(0.01, 0, 0), tolerance = 1e-07) expect_equal(x12$rejectPerStage[2, ], c(0.71, 0.45, 0.01), tolerance = 1e-07) expect_equal(x12$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x12$earlyStop, c(0.01, 0, 0), tolerance = 1e-07) expect_equal(x12$expectedNumberOfSubjects, c(65.632546, 86.865451, 105.50507), tolerance = 1e-07) expect_equal(x12$sampleSizes[1, ], c(10, 10, 10)) expect_equal(x12$sampleSizes[2, ], c(56.194491, 76.865451, 95.50507), tolerance = 1e-07) expect_equal(x12$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x12$conditionalPowerAchieved[2, ], c(0.52087291, 0.38731069, 0.15906003), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x12), NA))) expect_output(print(x12)$show()) invisible(capture.output(expect_error(summary(x12), NA))) expect_output(summary(x12)$show()) x12CodeBased <- eval(parse(text = getObjectRCode(x12, stringWrapParagraphWidth = NULL))) expect_equal(x12CodeBased$effect, x12$effect, tolerance = 1e-05) expect_equal(x12CodeBased$iterations, x12$iterations, tolerance = 1e-05) expect_equal(x12CodeBased$overallReject, x12$overallReject, tolerance = 1e-05) expect_equal(x12CodeBased$rejectPerStage, x12$rejectPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$futilityPerStage, x12$futilityPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$earlyStop, x12$earlyStop, tolerance = 1e-05) expect_equal(x12CodeBased$expectedNumberOfSubjects, x12$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x12CodeBased$sampleSizes, x12$sampleSizes, tolerance = 1e-05) expect_equal(x12CodeBased$conditionalPowerAchieved, x12$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x12), "character") df <- as.data.frame(x12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMeans': comparison with getPowerMeans() results", { # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationMeans} # @refFS[Formula]{fs:simulationOneArmMeansTestStatistics} # @refFS[Formula]{fs:simulationTwoArmMeansTestStatisticsDiff} # @refFS[Formula]{fs:simulationTwoArmMeansTestStatisticsRatio} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} .skipTestIfDisabled() x1 <- getSimulationMeans( seed = 1234, design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 2, meanRatio = TRUE, thetaH0 = 0.4, plannedSubjects = c(40, 100, 200), maxNumberOfIterations = 1000, allocationRatioPlanned = 3, stDev = 1.5 ) y1 <- getPowerMeans( design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 2, meanRatio = TRUE, thetaH0 = 0.4, maxNumberOfSubjects = 200, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE ) expectedNumberOfSubjectsDiff <- round((x1$expectedNumberOfSubjects - y1$expectedNumberOfSubjects) / 200, 4) ## Comparison of the results of numeric object 'expectedNumberOfSubjectsDiff' with expected results expect_equal(expectedNumberOfSubjectsDiff, c(0.0027, 0.0092, 0.0016, -0.0071, 0.0018, 0.0013), tolerance = 1e-07) overallRejectDiff1 <- round(x1$overallReject - y1$overallReject, 4) ## Comparison of the results of numeric object 'overallRejectDiff1' with expected results expect_equal(overallRejectDiff1, c(-0.0018, 0.0015, 2e-04, 0, 0, 0), tolerance = 1e-07) futilityStopDiff1 <- round(x1$futilityStop - y1$futilityStop, 4) ## Comparison of the results of numeric object 'futilityStopDiff1' with expected results expect_equal(futilityStopDiff1, c(0.003, -0.0012, -2e-04, 0, 0, 0), tolerance = 1e-07) x2 <- getSimulationMeans( seed = 1234, design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 2, meanRatio = FALSE, thetaH0 = 0.2, plannedSubjects = c(40, 100, 200), maxNumberOfIterations = 1000, allocationRatioPlanned = 3, stDev = 1.5 ) y2 <- getPowerMeans( design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 2, meanRatio = FALSE, thetaH0 = 0.2, maxNumberOfSubjects = 200, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE ) expectedNumberOfSubjectsDiff <- round((x2$expectedNumberOfSubjects - y2$expectedNumberOfSubjects) / 200, 4) ## Comparison of the results of numeric object 'expectedNumberOfSubjectsDiff' with expected results expect_equal(expectedNumberOfSubjectsDiff, c(-0.0117, 0.0015, -4e-04, 4e-04, -0.0018, 0.0065), tolerance = 1e-07) overallRejectDiff2 <- round(x2$overallReject - y2$overallReject, 4) ## Comparison of the results of numeric object 'overallRejectDiff2' with expected results expect_equal(overallRejectDiff2, c(-0.0016, 0.0111, 0.0023, 0.0198, 0.0107, -0.0071), tolerance = 1e-07) futilityStopDiff2 <- round(x2$futilityStop - y2$futilityStop, 4) ## Comparison of the results of numeric object 'futilityStopDiff2' with expected results expect_equal(futilityStopDiff2, c(0.0132, -0.0034, 0.0147, -3e-04, 0.0035, 0.0013), tolerance = 1e-07) x4 <- getSimulationMeans( seed = 1234, design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 1, thetaH0 = 0.2, plannedSubjects = c(40, 100, 200), maxNumberOfIterations = 1000, stDev = 1.5 ) y4 <- getPowerMeans( design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 1, thetaH0 = 0.2, maxNumberOfSubjects = 200, stDev = 1.5, normalApproximation = TRUE ) expectedNumberOfSubjectsDiff <- round((x4$expectedNumberOfSubjects - y4$expectedNumberOfSubjects) / 200, 4) ## Comparison of the results of numeric object 'expectedNumberOfSubjectsDiff' with expected results expect_equal(expectedNumberOfSubjectsDiff, c(-0.0038, 0.0042, 0.0102, -0.0074, -0.002, -0.0036), tolerance = 1e-07) overallRejectDiff4 <- round(x4$overallReject - y4$overallReject, 4) ## Comparison of the results of numeric object 'overallRejectDiff4' with expected results expect_equal(overallRejectDiff4, c(-1e-04, 0.0121, -0.0064, 0.0131, -0.0015, 1e-04), tolerance = 1e-07) futilityStopDiff4 <- round(x4$futilityStop - y4$futilityStop, 4) ## Comparison of the results of numeric object 'futilityStopDiff4' with expected results expect_equal(futilityStopDiff4, c(0.0013, -0.0094, -0.0191, -0.007, 0.0016, -1e-04), tolerance = 1e-07) x5 <- getSimulationMeans( seed = 1234, design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 2, meanRatio = TRUE, thetaH0 = 1.1, plannedSubjects = c(40, 100, 200), maxNumberOfIterations = 1000, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE ) y5 <- getPowerMeans( design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 2, meanRatio = TRUE, thetaH0 = 1.1, maxNumberOfSubjects = 200, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE, directionUpper = FALSE ) expectedNumberOfSubjectsDiff <- round((x5$expectedNumberOfSubjects - y5$expectedNumberOfSubjects) / 200, 4) ## Comparison of the results of numeric object 'expectedNumberOfSubjectsDiff' with expected results expect_equal(expectedNumberOfSubjectsDiff, c(0.008, -0.0088, 0.0023, -0.001, -0.0062, -0.0039), tolerance = 1e-07) overallRejectDiff5 <- round(x5$overallReject - y5$overallReject, 4) ## Comparison of the results of numeric object 'overallRejectDiff5' with expected results expect_equal(overallRejectDiff5, c(0, -0.0019, -9e-04, -1e-04, 0, 0), tolerance = 1e-07) futilityStopDiff5 <- round(x5$futilityStop - y5$futilityStop, 4) ## Comparison of the results of numeric object 'futilityStopDiff5' with expected results expect_equal(futilityStopDiff5, c(-0.0164, 0.0103, 0.0038, 0.0057, 0.0018, 6e-04), tolerance = 1e-07) x6 <- getSimulationMeans( seed = 1234, design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 2, meanRatio = FALSE, thetaH0 = 1.1, plannedSubjects = c(40, 100, 200), maxNumberOfIterations = 1000, allocationRatioPlanned = 3, stDev = 1.5, directionUpper = FALSE ) y6 <- getPowerMeans( design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 2, meanRatio = FALSE, thetaH0 = 1.1, maxNumberOfSubjects = 200, allocationRatioPlanned = 3, stDev = 1.5, normalApproximation = TRUE, directionUpper = FALSE ) expectedNumberOfSubjectsDiff <- round((x6$expectedNumberOfSubjects - y6$expectedNumberOfSubjects) / 200, 4) ## Comparison of the results of numeric object 'expectedNumberOfSubjectsDiff' with expected results expect_equal(expectedNumberOfSubjectsDiff, c(0.0029, -0.0013, 0.0079, 0.023, -0.003, -0.0132), tolerance = 1e-07) overallRejectDiff6 <- round(x6$overallReject - y6$overallReject, 4) ## Comparison of the results of numeric object 'overallRejectDiff6' with expected results expect_equal(overallRejectDiff6, c(0.0036, 0.003, -0.0112, -0.0033, -0.0108, -0.0031), tolerance = 1e-07) futilityStopDiff6 <- round(x6$futilityStop - y6$futilityStop, 4) ## Comparison of the results of numeric object 'futilityStopDiff6' with expected results expect_equal(futilityStopDiff6, c(-0.004, 2e-04, 0.0083, -0.0213, -4e-04, 0.0232), tolerance = 1e-07) x7 <- getSimulationMeans( seed = 1234, design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 1, thetaH0 = 0.8, plannedSubjects = c(40, 100, 200), maxNumberOfIterations = 1000, stDev = 1.5, directionUpper = FALSE ) y7 <- getPowerMeans( design = getDesignInverseNormal(futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.5, 1)), groups = 1, thetaH0 = 0.8, maxNumberOfSubjects = 200, stDev = 1.5, normalApproximation = TRUE, directionUpper = FALSE ) expectedNumberOfSubjectsDiff <- round((x7$expectedNumberOfSubjects - y7$expectedNumberOfSubjects) / 200, 4) ## Comparison of the results of numeric object 'expectedNumberOfSubjectsDiff' with expected results expect_equal(expectedNumberOfSubjectsDiff, c(0.0012, 6e-04, -0.0061, -3e-04, 0.0091, 0.0036), tolerance = 1e-07) overallRejectDiff7 <- round(x7$overallReject - y7$overallReject, 4) ## Comparison of the results of numeric object 'overallRejectDiff7' with expected results expect_equal(overallRejectDiff7, c(1e-04, 5e-04, -9e-04, -0.0224, -9e-04, -1e-04), tolerance = 1e-07) futilityStopDiff7 <- round(x7$futilityStop - y7$futilityStop, 4) ## Comparison of the results of numeric object 'futilityStopDiff7' with expected results expect_equal(futilityStopDiff7, c(-1e-04, -4e-04, -0.003, 0.0059, -4e-04, 0.0033), tolerance = 1e-07) }) test_that("Internal simulation base means functions throw errors when arguments are missing or wrong", { expect_error(.getSimulationMeansStageSubjects()) }) rpact/tests/testthat/test-class_summary.R0000644000176200001440000006257214370207346020371 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-class_summary.R ## | Creation date: 06 February 2023, 12:04:16 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Class 'SummaryFactory'") test_that("Testing 'summary.ParameterSet': no errors occur", { .skipTestIfDisabled() # @refFS[Function]{fs:outputOfGenericFunctions} invisible(capture.output(expect_error(summary(getDesignGroupSequential( beta = 0.05, typeOfDesign = "asKD", gammaA = 1, typeBetaSpending = "bsOF" )), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 4, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 4, sided = 2), digits = 0), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(kMax = 1, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(futilityBounds = c(-6, 0))), NA))) invisible(capture.output(expect_error(summary(getDesignGroupSequential(futilityBounds = c(-6, 0)), digits = 5), NA))) invisible(capture.output(expect_error(summary(getDataset( n = c(13, 25), means = c(242, 222), stDevs = c(244, 221) )), NA))) invisible(capture.output(expect_error(summary(getDataset( n = c(13), means = c(242), stDevs = c(244) )), NA))) invisible(capture.output(expect_error(summary(getDataset( n1 = c(13, 25), n2 = c(15, NA), n3 = c(14, 27), n4 = c(12, 29), means1 = c(242, 222), means2 = c(188, NA), means3 = c(267, 277), means4 = c(92, 122), stDevs1 = c(244, 221), stDevs2 = c(212, NA), stDevs3 = c(256, 232), stDevs4 = c(215, 227) )), NA))) invisible(capture.output(expect_error(summary(getDataset( n1 = c(11, 13, 12, 13), n2 = c(8, 10, 9, 11), n3 = c(7, 10, 8, 9), events1 = c(10, 10, 12, 12), events2 = c(3, 5, 5, 6), events3 = c(2, 4, 3, 5) )), NA))) invisible(capture.output(expect_error(summary(getDataset( events1 = c(25, 32), events2 = c(18, NA), events3 = c(22, 36), logRanks1 = c(2.2, 1.8), logRanks2 = c(1.99, NA), logRanks3 = c(2.32, 2.11) )), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(futilityBounds = c(0, 1))), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 4, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 4, sided = 2), digits = 0), NA))) invisible(capture.output(expect_error(summary(getDesignInverseNormal(kMax = 1, sided = 2)), NA))) invisible(capture.output(expect_error(summary(getDesignFisher()), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(alpha0Vec = c(0.1, 0.2))), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 1)), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 4), digits = 5), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 4), digits = 0), NA))) invisible(capture.output(expect_error(summary(getDesignFisher(kMax = 1)), NA))) ## test design plans - means invisible(capture.output(expect_error(summary(getSampleSizeMeans(sided = 2, alternative = -0.5)), NA))) invisible(capture.output(expect_warning(summary(getSampleSizeMeans(sided = 2), alternative = -0.5)))) # warning expected invisible(capture.output(expect_error(summary(getPowerMeans( sided = 1, alternative = c(-0.5, -0.3), maxNumberOfSubjects = 100, directionUpper = FALSE )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 2.5 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 1, groups = 1 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( thetaH0 = 0, sided = 2, stDev = 1, groups = 1 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, allocationRatioPlanned = 0 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, groups = 1 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( getDesignGroupSequential(futilityBounds = c(1, 2)) )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( getDesignGroupSequential(futilityBounds = c(1, 2)) ), digits = 0), NA))) invisible(capture.output(expect_error(summary(getPowerMeans( getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, alternative = 1 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( getDesignGroupSequential(kMax = 4, sided = 2) )), NA))) invisible(capture.output(expect_error(summary(getPowerMeans( getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100 )), NA))) invisible(capture.output(expect_error(summary(getPowerMeans( getDesignGroupSequential(kMax = 1, sided = 2), maxNumberOfSubjects = 100 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( getDesignGroupSequential(futilityBounds = c(1, 2)) )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( getDesignGroupSequential(futilityBounds = c(1, 2)) ), digits = 4), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( getDesignGroupSequential(futilityBounds = c(1, 2)) ), digits = 3), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( getDesignGroupSequential(futilityBounds = c(1, 2)) ), digits = 2), NA))) invisible(capture.output(expect_error(summary(getSampleSizeMeans( getDesignGroupSequential(futilityBounds = c(1, 2)) ), digits = -1), NA))) ## test design plans - rates invisible(capture.output(expect_error(summary(getSampleSizeRates(pi2 = 0.3)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.3)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.45)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates( groups = 2, thetaH0 = 0.45, allocationRatioPlanned = 0 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates( getDesignGroupSequential(futilityBounds = c(1, 2)) )), NA))) invisible(capture.output(expect_error(summary(getPowerRates( getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates( getDesignGroupSequential(kMax = 4, sided = 2) )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates( getDesignGroupSequential(kMax = 4, sided = 2), groups = 1, thetaH0 = 0.3 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates( getDesignGroupSequential(kMax = 1, sided = 2), groups = 1, thetaH0 = 0.2, pi1 = c(0.4, 0.5) )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates( getDesignGroupSequential(kMax = 1, sided = 2), groups = 1, thetaH0 = 0.2, pi1 = 0.4 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeRates( getDesignGroupSequential(kMax = 1, sided = 2), groups = 2, thetaH0 = 0, pi1 = 0.25 )), NA))) invisible(capture.output(expect_error(summary(getPowerRates( getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100 )), NA))) ## test design plans - survival invisible(capture.output(expect_error(summary(getSampleSizeSurvival()), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = 1.2)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = c(1.2, 2))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(pi2 = 0.3, hazardRatio = 1.2)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(pi1 = 0.1, pi2 = 0.3)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(lambda2 = 0.03, lambda1 = c(0.040))), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(piecewiseSurvivalTime = list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007 ), hazardRatio = 1.2)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival( getDesignGroupSequential(futilityBounds = c(1, 2)) )), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival( getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, maxNumberOfEvents = 60 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival( getDesignGroupSequential(kMax = 4, sided = 2) )), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival( getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100, maxNumberOfEvents = 60 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival(sided = 2, lambda2 = log(2) / 6, lambda1 = log(2) / 8)), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival(sided = 2, maxNumberOfSubjects = 200, maxNumberOfEvents = 40, lambda2 = log(2) / 6, lambda1 = log(2) / 8)), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival( getDesignGroupSequential(sided = 2), lambda2 = log(2) / 6, hazardRatio = c(0.55), accrualTime = c(0, 10), accrualIntensity = 20 )), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival( getDesignGroupSequential(kMax = 2), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30 )), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival( getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = c(log(2) / 50, log(2) / 60), dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30 )), NA))) invisible(capture.output(expect_error(summary(getPowerSurvival( getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, hazardRatio = c(0.7, 0.8), directionUpper = FALSE, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30 )), NA))) design1 <- getDesignGroupSequential( sided = 2, alpha = 0.05, beta = 0.2, informationRates = c(0.6, 1), typeOfDesign = "asOF", twoSidedPower = FALSE ) invisible(capture.output(expect_error(summary(getSampleSizeSurvival( design1, lambda2 = log(2) / 60, hazardRatio = 0.74, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival( design1, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12 )), NA))) invisible(capture.output(expect_error(summary(getSampleSizeSurvival( getDesignGroupSequential(kMax = 4, sided = 2) )), NA))) ## simulations design2 <- getDesignInverseNormal( alpha = 0.05, kMax = 4, futilityBounds = c(0, 0, 0), sided = 1, typeOfDesign = "WT", deltaWT = 0.1 ) invisible(capture.output(expect_error(summary(getSimulationSurvival(design2, lambda2 = log(2) / 60, lambda1 = c(log(2) / 80), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345, directionUpper = FALSE )), NA))) invisible(capture.output(expect_error(summary(getSimulationSurvival(design2, lambda2 = log(2) / 60, hazardRatio = c(1.2, 1.4), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345 )), NA))) design3 <- getDesignGroupSequential(typeOfDesign = "P", futilityBounds = c(1, 1)) invisible(capture.output(expect_error(summary(getSampleSizeMeans(design3)), NA))) invisible(capture.output(expect_error(summary(getSimulationMeans(design3, stDev = 4, plannedSubjects = (1:3) * 200, alternative = c(1, 2))), NA))) invisible(capture.output(expect_error(summary(getSimulationRates(design3, plannedSubjects = (1:3) * 200, pi1 = c(0.3, 0.4), maxNumberOfIterations = 1000, minNumberOfSubjectsPerStage = c(NA, 40, 40), maxNumberOfSubjectsPerStage = c(NA, 40, 400), conditionalPower = 0.8 )), NA))) invisible(capture.output(expect_error(summary(getSimulationMeans( getDesignGroupSequential(kMax = 1), stDev = 4, plannedSubjects = 200, alternative = c(1) )), NA))) }) test_that("Testing 'summary.ParameterSet': output will be produced", { .skipTestIfDisabled() # @refFS[Function]{fs:outputOfGenericFunctions} expect_output(summary(getDesignGroupSequential(beta = 0.05, typeOfDesign = "asKD", gammaA = 1, typeBetaSpending = "bsOF"))$show()) expect_output(summary(getDesignGroupSequential(kMax = 1))$show()) expect_output(summary(getDesignGroupSequential(kMax = 4, sided = 2))$show()) expect_output(summary(getDesignGroupSequential(kMax = 4, sided = 2), digits = 0)$show()) expect_output(summary(getDesignGroupSequential(kMax = 1, sided = 2))$show()) expect_output(summary(getDesignGroupSequential(futilityBounds = c(-6, 0)))$show()) expect_output(summary(getDesignGroupSequential(futilityBounds = c(-6, 0)), digits = 5)$show()) expect_output(summary(getDataset( n = c(13, 25), means = c(242, 222), stDevs = c(244, 221) ))$show()) expect_output(summary(getDataset( n = c(13), means = c(242), stDevs = c(244) ))$show()) expect_output(summary(getDataset( n1 = c(13, 25), n2 = c(15, NA), n3 = c(14, 27), n4 = c(12, 29), means1 = c(242, 222), means2 = c(188, NA), means3 = c(267, 277), means4 = c(92, 122), stDevs1 = c(244, 221), stDevs2 = c(212, NA), stDevs3 = c(256, 232), stDevs4 = c(215, 227) ))$show()) expect_output(summary(getDataset( n1 = c(11, 13, 12, 13), n2 = c(8, 10, 9, 11), n3 = c(7, 10, 8, 9), events1 = c(10, 10, 12, 12), events2 = c(3, 5, 5, 6), events3 = c(2, 4, 3, 5) ))$show()) expect_output(summary(getDataset( events1 = c(25, 32), events2 = c(18, NA), events3 = c(22, 36), logRanks1 = c(2.2, 1.8), logRanks2 = c(1.99, NA), logRanks3 = c(2.32, 2.11) ))$show()) expect_output(summary(getDesignInverseNormal(kMax = 1))$show()) expect_output(summary(getDesignInverseNormal(futilityBounds = c(0, 1)))$show()) expect_output(summary(getDesignInverseNormal(kMax = 1))$show()) expect_output(summary(getDesignInverseNormal(kMax = 4, sided = 2))$show()) expect_output(summary(getDesignInverseNormal(kMax = 4, sided = 2), digits = 0)$show()) expect_output(summary(getDesignInverseNormal(kMax = 1, sided = 2))$show()) expect_output(summary(getDesignFisher())$show()) expect_output(summary(getDesignFisher(alpha0Vec = c(0.1, 0.2)))$show()) expect_output(summary(getDesignFisher(kMax = 1))$show()) expect_output(summary(getDesignFisher(kMax = 4), digits = 5)$show()) expect_output(summary(getDesignFisher(kMax = 4), digits = 0)$show()) expect_output(summary(getDesignFisher(kMax = 1))$show()) ## test design plans - means expect_output(summary(getSampleSizeMeans(sided = 2, alternative = -0.5))$show()) expect_output(summary(getPowerMeans(sided = 1, alternative = c(-0.5, -0.3), maxNumberOfSubjects = 100, directionUpper = FALSE))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 2.5))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 0.5, sided = 1, stDev = 1, groups = 1))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, sided = 2, stDev = 1, groups = 1))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, allocationRatioPlanned = 0))$show()) expect_output(summary(getSampleSizeMeans(thetaH0 = 0, alternative = 1.2, sided = 2, stDev = 5, groups = 1))$show()) expect_output(summary(getSampleSizeMeans( getDesignGroupSequential(futilityBounds = c(1, 2)) ))$show()) expect_output(summary(getSampleSizeMeans( getDesignGroupSequential(futilityBounds = c(1, 2)) ), digits = 0)$show()) expect_output(summary(getPowerMeans( getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, alternative = 1 ))$show()) expect_output(summary(getSampleSizeMeans( getDesignGroupSequential(kMax = 4, sided = 2) ))$show()) expect_output(summary(getPowerMeans( getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100 ))$show()) expect_output(summary(getPowerMeans( getDesignGroupSequential(kMax = 1, sided = 2), maxNumberOfSubjects = 100 ))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))))$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 4)$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 3)$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = 2)$show()) expect_output(summary(getSampleSizeMeans(getDesignGroupSequential(futilityBounds = c(1, 2))), digits = -1)$show()) ## test design plans - rates expect_output(summary(getSampleSizeRates(pi2 = 0.3))$show()) expect_output(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.3))$show()) expect_output(summary(getSampleSizeRates(groups = 1, thetaH0 = 0.45))$show()) expect_output(summary(getSampleSizeRates(groups = 2, thetaH0 = 0.45, allocationRatioPlanned = 0))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(futilityBounds = c(1, 2))))$show()) expect_output(summary(getPowerRates(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 4, sided = 2)))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 4, sided = 2), groups = 1, thetaH0 = 0.3))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential(kMax = 1, sided = 2), groups = 1, thetaH0 = 0.2, pi1 = c(0.4, 0.5) ))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential( kMax = 1, sided = 2 ), groups = 1, thetaH0 = 0.2, pi1 = 0.4))$show()) expect_output(summary(getSampleSizeRates(getDesignGroupSequential( kMax = 1, sided = 2 ), groups = 2, thetaH0 = 0, pi1 = 0.25))$show()) expect_output(summary(getPowerRates(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100))$show()) ## test design plans - survival expect_output(summary(getSampleSizeSurvival())$show()) expect_output(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = 1.2))$show()) expect_output(summary(getSampleSizeSurvival(lambda2 = 0.3, hazardRatio = c(1.2, 2)))$show()) expect_output(summary(getSampleSizeSurvival(pi2 = 0.3, hazardRatio = 1.2))$show()) expect_output(summary(getSampleSizeSurvival(pi1 = 0.1, pi2 = 0.3))$show()) expect_output(summary(getSampleSizeSurvival(lambda2 = 0.03, lambda1 = c(0.040)))$show()) expect_output(summary(getSampleSizeSurvival(piecewiseSurvivalTime = list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007 ), hazardRatio = 1.2))$show()) expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(futilityBounds = c(1, 2))))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(futilityBounds = c(1, 2)), maxNumberOfSubjects = 100, maxNumberOfEvents = 60 ))$show()) expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(kMax = 4, sided = 2)))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 4, sided = 2), maxNumberOfSubjects = 100, maxNumberOfEvents = 60 ))$show()) expect_output(summary(getSampleSizeSurvival(sided = 2, lambda2 = log(2) / 6, lambda1 = log(2) / 8))$show()) expect_output(summary(getPowerSurvival( sided = 2, maxNumberOfSubjects = 200, maxNumberOfEvents = 40, lambda2 = log(2) / 6, lambda1 = log(2) / 8 ))$show()) expect_warning(expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(sided = 2), lambda2 = log(2) / 6, hazardRatio = c(0.55), accrualTime = c(0, 10), accrualIntensity = 60 ))$show()), "Accrual duration longer than maximal study duration (time to maximal number of events); followUpTime = -2.959", fixed = TRUE ) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 2), maxNumberOfEvents = 150, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30 ))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, lambda1 = c(log(2) / 50, log(2) / 60), dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30 ))$show()) expect_output(summary(getPowerSurvival(getDesignGroupSequential(kMax = 3), maxNumberOfEvents = 200, maxNumberOfSubjects = 400, lambda2 = log(2) / 60, hazardRatio = c(0.7, 0.8), directionUpper = FALSE, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30 ))$show()) design1 <- getDesignGroupSequential( sided = 2, alpha = 0.05, beta = 0.2, informationRates = c(0.6, 1), typeOfDesign = "asOF", twoSidedPower = FALSE ) expect_output(summary(getSampleSizeSurvival( design1, lambda2 = log(2) / 60, hazardRatio = 0.74, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12 ))$show()) expect_output(summary(getSampleSizeSurvival( design1, lambda2 = log(2) / 60, lambda1 = log(2) / 50, dropoutRate1 = 0.025, dropoutRate2 = 0.025, dropoutTime = 12, accrualTime = 0, accrualIntensity = 30, followUpTime = 12 ))$show()) expect_output(summary(getSampleSizeSurvival(getDesignGroupSequential(kMax = 4, sided = 2)))$show()) ## simulations design2 <- getDesignInverseNormal( alpha = 0.05, kMax = 4, futilityBounds = c(0, 0, 0), sided = 1, typeOfDesign = "WT", deltaWT = 0.1 ) expect_output(summary(getSimulationSurvival(design2, lambda2 = log(2) / 60, lambda1 = c(log(2) / 80), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345, directionUpper = FALSE ))$show()) expect_output(summary(getSimulationSurvival(design2, lambda2 = log(2) / 60, hazardRatio = c(1.2, 1.4), maxNumberOfSubjects = 1000, plannedEvents = c(50, 100, 150, 200), seed = 12345 ))$show()) design3 <- getDesignGroupSequential(typeOfDesign = "P", futilityBounds = c(1, 1)) expect_output(summary(getSampleSizeMeans(design3))$show()) expect_output(summary(getSimulationMeans(design3, stDev = 4, plannedSubjects = (1:3) * 200, alternative = c(1, 2)))$show()) expect_output(summary(getSimulationRates(design3, plannedSubjects = (1:3) * 200, pi1 = c(0.3, 0.4), maxNumberOfIterations = 1000, minNumberOfSubjectsPerStage = c(NA, 40, 40), maxNumberOfSubjectsPerStage = c(NA, 40, 400), conditionalPower = 0.8 ))$show()) expect_output(summary(getSimulationMeans(getDesignGroupSequential(kMax = 1), stDev = 4, plannedSubjects = 200, alternative = 1 ))$show()) }) rpact/tests/testthat/test-f_analysis_utilities.R0000644000176200001440000001033714370207346021722 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_analysis_utilities.R ## | Creation date: 06 February 2023, 12:11:54 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Function Get Observed Information Rates") test_that("'getObservedInformationRates': final-stage", { data1 <- getDataset(overallN = c(22, 45), overallEvents = c(11, 28)) # @refFS[Formula]{fs:getObservedInformationRates} # @refFS[Formula]{fs:getObservedInformationRates:finalStageReached} result1 <- getObservedInformationRates(data1, maxInformation = 45) ## Comparison of the results of list object 'result1' with expected results expect_equal(result1$absoluteInformations, c(22, 45)) expect_equal(result1$maxInformation, 45) expect_equal(result1$informationRates, c(0.48888889, 1), tolerance = 1e-07) expect_equal(result1$status, "final-stage") }) test_that("'getObservedInformationRates': over-running", { data2 <- getDataset(overallN = c(22, 45), overallEvents = c(11, 28)) # @refFS[Formula]{fs:getObservedInformationRates} # @refFS[Formula]{fs:getObservedInformationRates:overRunning} result2 <- getObservedInformationRates(data2, maxInformation = 44) ## Comparison of the results of list object 'result2' with expected results expect_equal(result2$absoluteInformations, c(22, 45)) expect_equal(result2$maxInformation, 45) expect_equal(result2$informationRates, c(0.48888889, 1), tolerance = 1e-07) expect_equal(result2$status, "over-running") }) test_that("'getObservedInformationRates': interim-stage", { data3 <- getDataset(overallN = c(22, 45), overallEvents = c(11, 28)) # @refFS[Formula]{fs:getObservedInformationRates} # @refFS[Formula]{fs:getObservedInformationRates:interimStage} result3 <- getObservedInformationRates(data3, maxInformation = 46) ## Comparison of the results of list object 'result3' with expected results expect_equal(result3$absoluteInformations, c(22, 45)) expect_equal(result3$maxInformation, 46) expect_equal(result3$informationRates, c(0.47826087, 0.97826087, 1), tolerance = 1e-07) expect_equal(result3$status, "interim-stage") }) test_that("'getObservedInformationRates': under-running with absolute information epsilon", { data4 <- getDataset(overallN = c(22, 45), overallEvents = c(11, 28)) # @refFS[Formula]{fs:getObservedInformationRates} # @refFS[Formula]{fs:getObservedInformationRates:underRunning} result4 <- getObservedInformationRates(data4, maxInformation = 46, informationEpsilon = 1) ## Comparison of the results of list object 'result4' with expected results expect_equal(result4$absoluteInformations, c(22, 45)) expect_equal(result4$maxInformation, 45) expect_equal(result4$informationEpsilon, 1) expect_equal(result4$informationRates, c(0.48888889, 1), tolerance = 1e-07) expect_equal(result4$status, "under-running") }) test_that("'getObservedInformationRates': under-running with relative information epsilon", { data5 <- getDataset(overallN = c(22, 45), overallEvents = c(11, 28)) # @refFS[Formula]{fs:getObservedInformationRates} # @refFS[Formula]{fs:getObservedInformationRates:underRunningRelative} result5 <- getObservedInformationRates(data5, maxInformation = 46, informationEpsilon = 0.03) ## Comparison of the results of list object 'result5' with expected results expect_equal(result5$absoluteInformations, c(22, 45)) expect_equal(result5$maxInformation, 45) expect_equal(result5$informationEpsilon, 0.03, tolerance = 1e-07) expect_equal(result5$informationRates, c(0.48888889, 1), tolerance = 1e-07) expect_equal(result5$status, "under-running") }) rpact/tests/testthat/helper-class_analysis_dataset.R0000644000176200001440000000275414277150417022522 0ustar liggesusers## | ## | *Unit tests helper functions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 6117 $ ## | Last changed: $Date: 2022-05-04 15:55:23 +0200 (Mi, 04 Mai 2022) $ ## | Last changed by: $Author: pahlke $ ## | getMultipleStageResultsForDataset <- function(dataset, thetaH0 = NA_real_) { stage <- dataset$getNumberOfStages() kMax <- stage + 1 design1 <- getDesignGroupSequential(kMax = kMax) design2 <- getDesignInverseNormal(kMax = kMax) design3 <- getDesignFisher(kMax = kMax) stageResults1 <- getStageResults(design = design1, dataInput = dataset, stage = stage, thetaH0 = thetaH0) stageResults2 <- getStageResults(design = design2, dataInput = dataset, stage = stage, thetaH0 = thetaH0) stageResults3 <- getStageResults(design = design3, dataInput = dataset, stage = stage, thetaH0 = thetaH0) return(list( stageResults1 = stageResults1, stageResults2 = stageResults2, stageResults3 = stageResults3 )) } rpact/tests/testthat/test-f_design_power_calculator.R0000644000176200001440000102257314372422771022713 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_design_power_calculator.R ## | Creation date: 13 February 2023, 12:02:47 ## | File version: $Revision: 6810 $ ## | Last changed: $Date: 2023-02-13 12:58:47 +0100 (Mo, 13 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Power Calculation of Testing Means for Different Designs and Arguments") test_that("'getPowerMeans': Power calculation of means in one sample for one-sided group sequential design", { designGS1 <- getDesignGroupSequential( informationRates = c(0.3, 0.7, 1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5, 0.5), typeOfDesign = "WT", deltaWT = 0.22 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = c(-1, 1.2, 1.4), directionUpper = TRUE, maxNumberOfSubjects = 50 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, c(-1.5, 0.7, 0.9), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(1.2624119e-07, 0.79805947, 0.93305789), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(1.2596734e-07, 0.17254516, 0.28730882), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(2.7189458e-10, 0.43368823, 0.5145435), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(1.9550892e-12, 0.19182608, 0.13120557), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.99999942, 0.078678761, 0.02585129), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.99114779, 0.032857727, 0.013099441), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.008851635, 0.045821034, 0.01275185), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.99999955, 0.68491215, 0.82770361), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(15.177049, 35.61826, 31.576281), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8259013, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.1288256, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.97002208, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.2359398, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.67059547, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 1, thetaH0 = -0.5, stDev = 2, normalApproximation = FALSE, alternative = c(-1.2, -1), directionUpper = FALSE, maxNumberOfSubjects = 50 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, c(-0.7, -0.5), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.79805947, 0.56526867), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.17254516, 0.092241599), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.43368823, 0.28692789), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19182608, 0.18609918), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.078678761, 0.19394481), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.032857727, 0.072497778), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.045821034, 0.12144703), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.68491215, 0.5731143), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(35.61826, 38.108498), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$criticalValuesEffectScale[1, ], -1.8259013, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.1288256, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -0.97002208, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.2359398, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.67059547, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 1.2, maxNumberOfSubjects = 50 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, 0.7, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.80544254, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.17645213), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.43857394), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.19041646), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.075570189, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.031759279), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.04381091), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.69059627, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 35.476828, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.6797184, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.1091952, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.96124634, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.24180111, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.66903085, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = -0.5, stDev = 2, normalApproximation = TRUE, alternative = -1.2, directionUpper = FALSE, maxNumberOfSubjects = 50 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, -0.7, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.37256342, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.0540554), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.17942496), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13908306), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.32503231, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.11944374), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.20558857), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.55851267, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 38.152327, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$criticalValuesEffectScale[1, ], -2.8594368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.7183904, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -1.4224927, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.01639778, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.8380617, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerMeans': Power calculation of means in one sample for two-sided group sequential design", { .skipTestIfDisabled() designGS2 <- getDesignGroupSequential( informationRates = c(0.34, 0.66, 1), alpha = 0.12, sided = 2, beta = 0.15, typeOfDesign = "WT", deltaWT = 0.12 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 1.2, maxNumberOfSubjects = 50 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, 0.7, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.79752024, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.14049601), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.38370336), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.27332087), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.52419937, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 38.840675, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 17) expect_equal(powerResult$numberOfSubjects[2, ], 33) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -0.86833341, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.20368487, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.020865698, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.8683334, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 1.2036849, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.9791343, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.01229935, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.051692876, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.096614336, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = -0.5, stDev = 2, normalApproximation = FALSE, alternative = -1.2, maxNumberOfSubjects = 50 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, -0.7, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.79752024, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.14049601), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.38370336), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.27332087), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.52419937, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 38.840675, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 17) expect_equal(powerResult$numberOfSubjects[2, ], 33) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.8683334, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -1.2036849, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.9791343, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.86833341, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.20368487, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], -0.020865698, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.01229935, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.051692876, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.096614336, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 1.2, maxNumberOfSubjects = 50 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, 0.7, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.80597731, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.14453229), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.38954071), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.27190431), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.534073, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 38.608242, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 17) expect_equal(powerResult$numberOfSubjects[2, ], 33) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -0.71434543, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.17739974, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.03005862, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.7143454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 1.1773997, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.96994138, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.01229935, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.051692876, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.096614336, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterOneSampleMean} powerResult <- getPowerMeans(designGS2, groups = 1, thetaH0 = -0.5, stDev = 2, normalApproximation = TRUE, alternative = -1.2, maxNumberOfSubjects = 50 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, -0.7, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.80597731, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.14453229), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.38954071), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.27190431), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.534073, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 38.608242, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 17) expect_equal(powerResult$numberOfSubjects[2, ], 33) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.7143454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -1.1773997, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.96994138, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.71434543, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.17739974, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], -0.03005862, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.01229935, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.051692876, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.096614336, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerMeans': Power calculation of mean difference in two samples for one-sided group sequential design", { .skipTestIfDisabled() designGS1 <- getDesignGroupSequential( informationRates = c(0.3, 0.7, 1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5, 0.5), typeOfDesign = "WT", deltaWT = 0.22 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 1.5, meanRatio = FALSE, normalApproximation = FALSE, alternative = 1.8, directionUpper = TRUE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, 1.3, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84205533, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.19830007), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46269628), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.18105899), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.060564406, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.026384529), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.034179878), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72156075, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.682897, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.8183805, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.5902217, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.3144249, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.04183972, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.79556274, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = -0.5, stDev = 1.5, meanRatio = FALSE, normalApproximation = FALSE, alternative = -1.8, directionUpper = FALSE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, -1.3, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84205533, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.19830007), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46269628), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.18105899), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.060564406, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.026384529), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.034179878), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72156075, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.682897, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -2.8183805, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.5902217, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -1.3144249, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.04183972, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.79556274, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 1.5, meanRatio = FALSE, normalApproximation = TRUE, alternative = 1.8, maxNumberOfSubjects = 50, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, 1.3, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84894434, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.20296684), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46718133), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17879617), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.057814211, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.025383492), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.032430719), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72796238, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.513558, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.5433322, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.555157, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.2989021, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.0527864, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.79277002, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = -0.5, stDev = 1.5, meanRatio = FALSE, normalApproximation = TRUE, alternative = -1.8, directionUpper = FALSE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, -1.3, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.84894434, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.20296684), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.46718133), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17879617), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.057814211, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.025383492), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.032430719), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.72796238, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 34.513558, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], -2.5433322, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -1.555157, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -1.2989021, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.0527864, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.79277002, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanRatio} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.8, stDev = 1.5, meanRatio = TRUE, normalApproximation = FALSE, alternative = 1.8, directionUpper = TRUE, maxNumberOfSubjects = 50, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, 1) expect_equal(powerResult$overallReject, 0.77427796, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.16086364), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.41797637), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.19543795), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.08888951, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.036438496), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.052451014), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.66772952, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 36.038015, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.7808252, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.7314858, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.495845, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.40854768, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.0525289, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS1, groups = 2, thetaH0 = 0.8, stDev = 1.5, meanRatio = TRUE, normalApproximation = TRUE, alternative = 1.8, maxNumberOfSubjects = 50, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, 1) expect_equal(powerResult$overallReject, 0.7820561, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.16454336), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.42310788), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.19440486), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.085516174, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.035259709), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.050256465), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.67316741, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 35.906427, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 11.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 26.25, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 37.5, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 3.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 8.75, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 12.5, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.5458238, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.7015266, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.4825823, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.41790054, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.0501428, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerMeans': Power calculation of mean difference in two samples for two-sided group sequential design", { .skipTestIfDisabled() designGS2 <- getDesignGroupSequential( informationRates = c(0.3, 0.7, 1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.22 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS2, groups = 2, stDev = 2, normalApproximation = FALSE, alternative = 1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, 1.2, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87442088, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35754296), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37848399), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13839393), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73602695, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.808737, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesMeanDiff} powerResult <- getPowerMeans(designGS2, groups = 2, stDev = 2, normalApproximation = FALSE, alternative = -1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, -1.2, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87442088, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35754296), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37848399), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13839393), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73602695, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.808737, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.6972761, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.83631454, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62866109, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, 1.2, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87592587, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35907583), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37896773), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13788231), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73804356, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.74783, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterTwoSamplesMean} powerResult <- getPowerMeans(designGS2, groups = 2, stDev = 2, normalApproximation = TRUE, alternative = -1.2, maxNumberOfSubjects = 50, allocationRatioPlanned = 0.7 ) ## Comparison of the results of TrialDesignPlanMeans object 'powerResult' with expected results expect_equal(powerResult$effect, -1.2, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.87592587, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.35907583), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.37896773), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.13788231), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.73804356, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 31.74783, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 15) expect_equal(powerResult$numberOfSubjects[2, ], 35) expect_equal(powerResult$numberOfSubjects[3, ], 50) expect_equal(powerResult$numberOfSubjects1[1, ], 6.1764706, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[2, ], 14.411765, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8.8235294, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[2, ], 20.588235, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 29.411765, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 1.5897396, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.82092617, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.62155644, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing the Power Calculation of Testing Rates for Different Designs and Arguments") test_that("'getPowerRates': Power calculation of rate in one sample for one-sided group sequential design", { .skipTestIfDisabled() designGS1 <- getDesignGroupSequential( informationRates = c(0.3, 0.7, 1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5, 0.5), typeOfDesign = "WT", deltaWT = 0.22 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleRate} powerResult <- getPowerRates(designGS1, groups = 1, thetaH0 = 0.4, pi1 = c(0.2, 0.3, 0.4), directionUpper = FALSE, maxNumberOfSubjects = 40 ) ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results expect_equal(powerResult$effect, c(-0.2, -0.1, 0), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(26.793099, 30.568926, 25.859698), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.8850078, 0.38742607, 0.067448723), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.23143452, 0.056551742, 0.011170644), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.48990786, 0.18729986, 0.030436001), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.16366541, 0.14357447, 0.025842077), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.043768704, 0.31327331, 0.71047424), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.020163481, 0.11504671, 0.30853754), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.023605223, 0.1982266, 0.40193671), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.76511109, 0.55712491, 0.75208089), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.076920806, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.23316503, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.27368249, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.47071068, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.353709, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleRate} powerResult <- getPowerRates(designGS1, groups = 1, thetaH0 = 0.4, pi1 = c(0.4, 0.5, 0.6), directionUpper = , maxNumberOfSubjects = 40 ) ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results expect_equal(powerResult$effect, c(0, 0.1, 0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(25.859698, 30.585503, 27.927522), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.39348465, 0.83236985), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.057586328, 0.19206788), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.19052871, 0.45635017), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.14536961, 0.1839518), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.30857493, 0.064469377), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.11330227, 0.027796437), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.19527267, 0.03667294), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.55668998, 0.71288743), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.72307919, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.56683497, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.52631751, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.32928932, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.446291, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerRates': Power calculation of rate in one sample for two-sided group sequential design", { .skipTestIfDisabled() designGS2 <- getDesignGroupSequential( informationRates = c(0.3, 0.7, 1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.22 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterOneSampleRate} powerResult <- getPowerRates(designGS2, groups = 1, thetaH0 = 0.4, pi1 = seq(0.2, 0.6, 0.1), maxNumberOfSubjects = 40 ) ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results expect_equal(powerResult$effect, c(-0.2, -0.1, 0, 0.1, 0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(20.319274, 30.129425, 34.422159, 30.357182, 22.419855), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.97746912, 0.67692518, 0.4, 0.66457209, 0.94801088), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.54595705, 0.22704321, 0.1297467, 0.22142183, 0.46151826), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.36616073, 0.29278043, 0.16207777, 0.28691724, 0.38813612), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.065351333, 0.15710154, 0.10817552, 0.15623302, 0.098356497), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.91211779, 0.51982364, 0.29182448, 0.50833906, 0.84965439), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.18573229, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.28935423, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.3162256, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.61426771, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.51064577, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.4837744, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerRates': Power calculation of rate in two samples for one-sided group sequential design, riskRatio = FALSE", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} designGS1 <- getDesignGroupSequential( informationRates = c(0.3, 0.7, 1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5, 0.5), typeOfDesign = "WT", deltaWT = 0.22 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = 0.1, pi2 = 0.4, pi1 = c(0.1, 0.2, 0.3), directionUpper = FALSE, maxNumberOfSubjects = 40, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results expect_equal(powerResult$effect, c(-0.4, -0.3, -0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(27.333747, 30.142404, 30.525807), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.86217083, 0.63525529, 0.37370586), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.21254585, 0.11056737, 0.054245237), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.47569558, 0.32910884, 0.18002797), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.17392941, 0.19557908, 0.13943265), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.05259588, 0.1553509, 0.32411639), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.023466961, 0.059262043, 0.11909962), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.029128919, 0.096088854, 0.20501677), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.74083731, 0.59502711, 0.5583896), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$numberOfSubjects1[1, ], 9) expect_equal(powerResult$numberOfSubjects1[2, ], 21) expect_equal(powerResult$numberOfSubjects1[3, ], 30) expect_equal(powerResult$numberOfSubjects2[1, ], 3) expect_equal(powerResult$numberOfSubjects2[2, ], 7) expect_equal(powerResult$numberOfSubjects2[3, ], 10) expect_equal(powerResult$criticalValuesEffectScale[1, ], -0.3905544, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], -0.21681979, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], -0.15504053, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.26517501, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.00361566, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = -0.1, pi2 = 0.4, pi1 = c(0.2, 0.3, 0.4, 0.5), directionUpper = TRUE, maxNumberOfSubjects = 80, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results expect_equal(powerResult$effect, c(-0.1, -2.7755576e-17, 0.1, 0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(42.4454, 51.719397, 58.823585, 61.315141), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.011153335, 0.067448723, 0.22125497, 0.49276327), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0028716829, 0.011170644, 0.031364648, 0.076178456), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.0049229598, 0.030436001, 0.1027412, 0.24505539), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.0033586921, 0.025842077, 0.087149125, 0.17152942), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.89841517, 0.71047424, 0.46922933, 0.23841544), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.49105221, 0.30853754, 0.17789692, 0.08798644), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40736296, 0.40193671, 0.29133241, 0.150429), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.90620981, 0.75208089, 0.60333518, 0.55964928), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 24) expect_equal(powerResult$numberOfSubjects[2, ], 56) expect_equal(powerResult$numberOfSubjects[3, ], 80) expect_equal(powerResult$numberOfSubjects1[1, ], 18) expect_equal(powerResult$numberOfSubjects1[2, ], 42) expect_equal(powerResult$numberOfSubjects1[3, ], 60) expect_equal(powerResult$numberOfSubjects2[1, ], 6) expect_equal(powerResult$numberOfSubjects2[2, ], 14) expect_equal(powerResult$numberOfSubjects2[3, ], 20) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.38186802, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.17360028, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.10931124, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], -0.20652185, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], -0.02383242, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerRates': Power calculation of rate in two samples for one-sided group sequential design, riskRatio = TRUE", { .skipTestIfDisabled() designGS1 <- getDesignGroupSequential( informationRates = c(0.3, 0.7, 1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5, 0.5), typeOfDesign = "WT", deltaWT = 0.22 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateRatio} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = 0.8, pi2 = 0.5, pi1 = c(0.1, 0.2, 0.3), riskRatio = TRUE, directionUpper = FALSE, maxNumberOfSubjects = 40, allocationRatioPlanned = 5 ) ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results expect_equal(powerResult$effect, c(-0.6, -0.4, -0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(29.869153, 30.545915, 28.722194), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.67404635, 0.37979679, 0.17337279), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.12233203, 0.055263055, 0.02493902), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.35325438, 0.1832494, 0.079687483), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19845995, 0.14128433, 0.068746287), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.13554504, 0.31926733, 0.52845861), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.052497346, 0.11728241, 0.20511002), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.083047698, 0.20198492, 0.32334859), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.61113145, 0.55777979, 0.63308512), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$numberOfSubjects1[1, ], 10) expect_equal(powerResult$numberOfSubjects1[2, ], 23.333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 33.333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 2) expect_equal(powerResult$numberOfSubjects2[2, ], 4.6666667, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 6.6666667, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], NA_real_) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.19789883, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.30397209, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 1.1132916, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.59448494, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateRatio} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} powerResult <- getPowerRates(designGS1, groups = 2, thetaH0 = 0.8, pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), riskRatio = TRUE, directionUpper = TRUE, maxNumberOfSubjects = 80, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results expect_equal(powerResult$effect, c(0.2, 0.45, 0.7), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(58.50994, 61.208415, 55.770675), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.20890064, 0.52512104, 0.83467468), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.029681783, 0.083038809, 0.19351805), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.096741134, 0.26351903, 0.45786385), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.082477726, 0.17856321, 0.18329277), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.48366053, 0.21795048, 0.063536004), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.18431999, 0.080816996, 0.027459911), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.29934054, 0.13713348, 0.036076093), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.61008345, 0.56450831, 0.71491791), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 24) expect_equal(powerResult$numberOfSubjects[2, ], 56) expect_equal(powerResult$numberOfSubjects[3, ], 80) expect_equal(powerResult$numberOfSubjects1[1, ], 18) expect_equal(powerResult$numberOfSubjects1[2, ], 42) expect_equal(powerResult$numberOfSubjects1[3, ], 60) expect_equal(powerResult$numberOfSubjects2[1, ], 6) expect_equal(powerResult$numberOfSubjects2[2, ], 14) expect_equal(powerResult$numberOfSubjects2[3, ], 20) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8651141, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.3871263, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.2471692, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.57000905, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.96223105, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerRates': Power calculation of rate in two samples for two-sided group sequential design", { .skipTestIfDisabled() designGS2 <- getDesignGroupSequential( informationRates = c(0.3, 0.7, 1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.22 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateDiff} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} powerResult <- getPowerRates(designGS2, groups = 2, pi2 = 0.5, pi1 = c(0.1, 0.2, 0.3), riskRatio = FALSE, maxNumberOfSubjects = 40, allocationRatioPlanned = 0.5 ) ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results expect_equal(powerResult$effect, c(-0.4, -0.3, -0.2), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(20.586564, 26.282925, 30.696455), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.9745822, 0.84688722, 0.64568809), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.53456929, 0.33187612, 0.2131539), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.37045799, 0.36871195, 0.27793629), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.06955493, 0.14629915, 0.1545979), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.90502727, 0.70058807, 0.49109019), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 12) expect_equal(powerResult$numberOfSubjects[2, ], 28) expect_equal(powerResult$numberOfSubjects[3, ], 40) expect_equal(powerResult$numberOfSubjects1[1, ], 4) expect_equal(powerResult$numberOfSubjects1[2, ], 9.3333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects1[3, ], 13.333333, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[1, ], 8) expect_equal(powerResult$numberOfSubjects2[2, ], 18.666667, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects2[3, ], 26.666667, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], -0.44319209, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], -0.2365574, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], -0.18006528, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 0.44319209, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 0.2365574, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 0.18006528, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:AdjShiftParameterTwoSamplesRateRatio} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} powerResult <- getPowerRates(designGS2, groups = 2, pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), riskRatio = TRUE, maxNumberOfSubjects = 80, allocationRatioPlanned = 7 ) ## Comparison of the results of TrialDesignPlanRates object 'powerResult' with expected results expect_equal(powerResult$effect, c(0, 0.25, 0.5), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(68.844318, 66.97762, 61.620959), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.4, 0.46817413, 0.63921164), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.1297467, 0.14947843, 0.21040306), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.16207777, 0.19381617, 0.27485292), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.10817552, 0.12487952, 0.15395566), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.29182448, 0.3432946, 0.48525598), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[1, ], 24) expect_equal(powerResult$numberOfSubjects[2, ], 56) expect_equal(powerResult$numberOfSubjects[3, ], 80) expect_equal(powerResult$numberOfSubjects1[1, ], 21) expect_equal(powerResult$numberOfSubjects1[2, ], 49) expect_equal(powerResult$numberOfSubjects1[3, ], 70) expect_equal(powerResult$numberOfSubjects2[1, ], 3) expect_equal(powerResult$numberOfSubjects2[2, ], 7) expect_equal(powerResult$numberOfSubjects2[3, ], 10) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.22081341, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.49677588, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5992042, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 2.0083461, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 1.5897897, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.4538504, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.1297467, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.23204368, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.27946463, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$effect, powerResult$effect, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects1, powerResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects2, powerResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing the Power Calculation of Survival Designs for Different Designs and Arguments") test_that("'getPowerSurvival': Fixed sample size with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(maxNumberOfEvents = 40, maxNumberOfSubjects = 200) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(6.1115255, 3.442577, 1.6316894, 0.30440109), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Power calculation of survival designs for one-sided group sequential design", { .skipTestIfDisabled() designGS1 <- getDesignGroupSequential( informationRates = c(0.3, 0.7, 1), sided = 1, alpha = 0.07, beta = 0.1, futilityBounds = c(-0.5, 0.5), typeOfDesign = "WT", deltaWT = 0.22 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 80, maxNumberOfEvents = 45, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(32.565971, 24, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.021284401, 0.028881133, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.3569154, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 6.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(41.628872, 30.417026, 22.638977), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(29.092161, 33.496718, 34.368969), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25463139, 0.54601962), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.036015488, 0.087726198), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.11913846, 0.27563412), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.099477436, 0.1826593), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.43269831, 0.2052719), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.16216653, 0.076412449), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.27053178, 0.12885945), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58785226, 0.56863222), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.123713, 13.244904, 11.839868), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(32.118539, 26.401459, 22.217088), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(32.017976, 30.394846, 25.872188), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 13.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 31.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 45) expect_equal(powerResult$numberOfSubjects[1, ], c(80, 80, 78.932452), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(80, 80, 80)) expect_equal(powerResult$numberOfSubjects[3, ], c(80, 80, 80)) expect_equal(powerResult$expectedNumberOfSubjects, c(80, 80, 79.824774), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 4.203458, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 2.0990582, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.7531447, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.73032205, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.2284311, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, typeOfComputation = "Freedman", pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 80, maxNumberOfEvents = 45, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(32.565971, 24, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.021284401, 0.028881133, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.3569154, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 6.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(41.628872, 30.417026, 22.638977), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(29.092161, 33.256688, 34.504982), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.23410594, 0.44983629), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.033136424, 0.067729226), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.10902189, 0.22109606), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.091947627, 0.16101101), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.45476178, 0.26727979), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.1715797, 0.098248524), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.28318207, 0.16903127), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.59692009, 0.55610508), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.123713, 13.244904, 11.839868), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(32.118539, 26.401459, 22.217088), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(32.017976, 30.163653, 26.008714), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 13.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 31.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 45) expect_equal(powerResult$numberOfSubjects[1, ], c(80, 80, 78.932452), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(80, 80, 80)) expect_equal(powerResult$numberOfSubjects[3, ], c(80, 80, 80)) expect_equal(powerResult$expectedNumberOfSubjects, c(80, 80, 79.822811), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 4.203458, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 2.0990582, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.7531447, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.73032205, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.2284311, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, typeOfComputation = "HsiehFreedman", pi2 = 0.4, pi1 = c(0.4, 0.5, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 80, maxNumberOfEvents = 45, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(32.565971, 24, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.021284401, 0.028881133, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.3569154, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 6.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(41.628872, 30.417026, 22.638977), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(29.092161, 33.473935, 34.421802), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25255296, 0.52822452), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.03572104, 0.083721511), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.11810922, 0.2653086), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.098722701, 0.17919441), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.43487767, 0.2160418), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.16308496, 0.080152238), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.27179271, 0.13588956), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58870793, 0.56507191), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.123713, 13.244904, 11.839868), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(32.118539, 26.401459, 22.217088), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(32.017976, 30.372933, 25.919163), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(53.628872, 42.417026, 34.638977), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 13.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 31.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 45) expect_equal(powerResult$numberOfSubjects[1, ], c(80, 80, 78.932452), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(80, 80, 80)) expect_equal(powerResult$numberOfSubjects[3, ], c(80, 80, 80)) expect_equal(powerResult$expectedNumberOfSubjects, c(80, 80, 79.825057), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 4.203458, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 2.0990582, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.7531447, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.73032205, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.2284311, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, lambda2 = 0.04, thetaH0 = 1.25, hazardRatio = 0.8, directionUpper = FALSE, maxNumberOfSubjects = 200, maxNumberOfEvents = 65, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, 21.660849, tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.032, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 5.7883102, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, 49.818428, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.49283375, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.076192913), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.24509523), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c("stage = 3" = 0.17154561), tolerance = 1e-07) expect_equal(powerResult$futilityStop, 0.2383697, tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c("stage = 1" = 0.087970326), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c("stage = 2" = 0.15039938), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.55965784, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 8.7091306, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 13.807185, tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], 17.78831, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 14.723329, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 17.78831, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 19.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 45.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 65) expect_equal(powerResult$numberOfSubjects[1, ], 145.15218, tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$numberOfSubjects[3, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 190.996, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.37847558, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.67448058, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 0.78350426, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 1.623577, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 1.0533329, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, lambda2 = 0.04, thetaH0 = 0.8, hazardRatio = seq(0.8, 1.4, 0.2), directionUpper = TRUE, maxNumberOfSubjects = 200, maxNumberOfEvents = 65, allocationRatioPlanned = 1 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(21.660849, 17.32868, 14.440566, 12.377628), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.032, 0.04, 0.048, 0.056), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(5.1617391, 4.0656056, 3.2120436, 2.5256004), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(42.02201, 48.445748, 49.742518, 47.47852), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25860493, 0.52208361, 0.74266051), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.03658032, 0.082375002, 0.14710823), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.12110923, 0.26177073, 0.39724295), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.10091538, 0.17793787, 0.19830932), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.42856456, 0.21982747, 0.10295201), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.1604311, 0.08147133, 0.041317452), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.26813346, 0.13835614, 0.061634556), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58625412, 0.5639732, 0.6473032), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4767885, 8.0592408, 7.7076518, 7.4060255), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.399188, 12.692623, 12.137705, 11.68467), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.161739, 16.065606, 15.212044, 14.5256), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(12.758265, 13.175351, 12.752351, 11.880451), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.161739, 16.065606, 15.212044, 14.5256), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 19.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 45.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 65) expect_equal(powerResult$numberOfSubjects[1, ], c(141.27981, 134.32068, 128.46086, 123.43376), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200, 194.7445), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(200, 200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(181.22667, 187.06042, 188.27858, 183.16132), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.2513678, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.3650021, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.1988902, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.63788392, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.92784212, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS1, eventTime = 120, pi2 = 0.4, thetaH0 = 0.8, hazardRatio = seq(0.8, 1.4, 0.2), directionUpper = TRUE, maxNumberOfSubjects = 200, maxNumberOfEvents = 65, allocationRatioPlanned = 1 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$pi1, c(0.33546019, 0.4, 0.45827173, 0.51088413), tolerance = 1e-07) expect_equal(powerResult$median1, c(203.53732, 162.82985, 135.69154, 116.30704), tolerance = 1e-07) expect_equal(powerResult$median2, 162.82985, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.0034055042, 0.0042568802, 0.0051082562, 0.0059596323), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.0042568802, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(96.86335, 86.356678, 78.102375, 71.398147), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(42.02201, 48.445748, 49.742518, 47.47852), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.067448723, 0.25860493, 0.52208361, 0.74266051), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.011170644, 0.03658032, 0.082375002, 0.14710823), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.030436001, 0.12110923, 0.26177073, 0.39724295), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.025842077, 0.10091538, 0.17793787, 0.19830932), tolerance = 1e-07) expect_equal(powerResult$futilityStop, c(0.71047424, 0.42856456, 0.21982747, 0.10295201), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[1, ], c(0.30853754, 0.1604311, 0.08147133, 0.041317452), tolerance = 1e-07) expect_equal(powerResult$futilityPerStage[2, ], c(0.40193671, 0.26813346, 0.13835614, 0.061634556), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.75208089, 0.58625412, 0.5639732, 0.6473032), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(32.816894, 30.124548, 27.945787, 26.142615), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(73.505015, 66.662265, 61.211479, 56.744296), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(108.86335, 98.356678, 90.102375, 83.398147), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(69.262697, 72.57735, 68.358222, 60.378881), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(108.86335, 98.356678, 90.102375, 83.398147), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 19.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 45.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 65) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 200, 200, 200)) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200, 200)) expect_equal(powerResult$numberOfSubjects[3, ], c(200, 200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$criticalValuesEffectScale[1, ], 2.2513678, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.3650021, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 1.1988902, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[1, ], 0.63788392, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsEffectScale[2, ], 0.92784212, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[1, ], 0.69146246, tolerance = 1e-07) expect_equal(powerResult$futilityBoundsPValueScale[2, ], 0.30853754, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$pi1, powerResult$pi1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityStop, powerResult$futilityStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityPerStage, powerResult$futilityPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsEffectScale, powerResult$futilityBoundsEffectScale, tolerance = 1e-05) expect_equal(powerResultCodeBased$futilityBoundsPValueScale, powerResult$futilityBoundsPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Power calculation of survival designs for two-sided group sequential design", { .skipTestIfDisabled() designGS2 <- getDesignGroupSequential( informationRates = c(0.3, 0.7, 1), alpha = 0.11, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.32 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, pi2 = 0.4, pi1 = c(0.2, 0.4, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(74.550809, 32.565971, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.009297648, 0.021284401, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(0.43682921, 1, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(14.361102, 11.603566, 9.1966475), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40.275667, 53.258703, 46.484493), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.80955491, 0.11, 0.5536311), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.20812766, 0.025692757, 0.10981107), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.4067526, 0.045583354, 0.25986553), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19467465, 0.038723888, 0.1839545), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.61488026, 0.071276112, 0.3696766), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(11.329861, 10.584003, 9.8033045), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(19.345216, 17.58497, 15.96575), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(20.378955, 22.994709, 18.586202), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$numberOfSubjects[1, ], c(169.94792, 158.76005, 147.04957), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 180)) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(177.90788, 179.45429, 176.38168), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "Freedman", pi2 = 0.4, pi1 = c(0.2, 0.4, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(74.550809, 32.565971, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.009297648, 0.021284401, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(0.43682921, 1, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(14.361102, 11.603566, 9.1966475), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(44.992896, 53.258703, 44.408918), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.62751278, 0.11, 0.65422406), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.13113454, 0.025692757, 0.13983652), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.30051056, 0.045583354, 0.31559857), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19586767, 0.038723888, 0.19878897), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.4316451, 0.071276112, 0.45543509), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(11.329861, 10.584003, 9.8033045), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(19.345216, 17.58497, 15.96575), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(22.281639, 22.994709, 17.952578), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$numberOfSubjects[1, ], c(169.94792, 158.76005, 147.04957), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 180)) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(178.68182, 179.45429, 175.39233), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "HsiehFreedman", pi2 = 0.4, pi1 = c(0.2, 0.4, 0.6), dropoutRate1 = 0.1, dropoutTime = 12, eventTime = 24, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(74.550809, 32.565971, 18.155299), tolerance = 1e-07) expect_equal(powerResult$median2, 32.565971, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.009297648, 0.021284401, 0.03817878), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.021284401, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(0.43682921, 1, 1.7937447), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(14.361102, 11.603566, 9.1966475), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(41.467466, 53.258703, 46.846888), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.77062516, 0.11, 0.53442991), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.18711904, 0.025692757, 0.10481397), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.38354247, 0.045583354, 0.24956205), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.19996364, 0.038723888, 0.18005389), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.57066151, 0.071276112, 0.35437602), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(11.329861, 10.584003, 9.8033045), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(19.345216, 17.58497, 15.96575), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(20.85758, 22.994709, 18.697033), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(26.361102, 23.603566, 21.196648), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$numberOfSubjects[1, ], c(169.94792, 158.76005, 147.04957), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 180)) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(178.11906, 179.45429, 176.54633), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, lambda2 = 0.04, hazardRatio = c(0.4, 1, 1.8), dropoutRate1 = 0.1, dropoutTime = 12, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(43.321699, 17.32868, 9.6270442), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.016, 0.04, 0.072), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(5.0603074, 3.4618446, 2.2113432), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(37.895698, 53.258703, 46.404972), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.8740886, 0.11, 0.55777827), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.25384788, 0.025692757, 0.11091682), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.44431262, 0.045583354, 0.26210486), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.17592811, 0.038723888, 0.18475659), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.6981605, 0.071276112, 0.37302168), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4193774, 7.8220347, 7.2555625), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.316286, 12.307189, 11.466641), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(13.20331, 15.121757, 12.72043), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$numberOfSubjects[1, ], c(126.29066, 117.33052, 108.83344), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 171.99961), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(166.366, 178.38985, 170.00949), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "Freedman", lambda2 = 0.04, hazardRatio = c(0.4, 1, 1.8), dropoutRate1 = 0.1, dropoutTime = 12, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(43.321699, 17.32868, 9.6270442), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.016, 0.04, 0.072), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(5.0603074, 3.4618446, 2.2113432), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(43.761896, 53.258703, 44.296935), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.68239647, 0.11, 0.65920633), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.14972738, 0.025692757, 0.14152926), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.33173334, 0.045583354, 0.31843565), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.20093576, 0.038723888, 0.19924141), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.48146072, 0.071276112, 0.45996492), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4193774, 7.8220347, 7.2555625), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.316286, 12.307189, 11.466641), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(14.524507, 15.121757, 12.352885), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$numberOfSubjects[1, ], c(126.29066, 117.33052, 108.83344), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 171.99961), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(171.95824, 178.38985, 167.38024), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialTwoSided} # @refFS[Formula]{fs:ShiftParameterSurvivalHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedPatientAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalFindFollowUpTime} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} powerResult <- getPowerSurvival(designGS2, typeOfComputation = "HsiehFreedman", lambda2 = 0.04, hazardRatio = c(0.4, 1, 1.8), dropoutRate1 = 0.1, dropoutTime = 12, maxNumberOfSubjects = 180, maxNumberOfEvents = 55, allocationRatioPlanned = 0.3 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(43.321699, 17.32868, 9.6270442), tolerance = 1e-07) expect_equal(powerResult$median2, 17.32868, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.016, 0.04, 0.072), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 15) expect_equal(powerResult$followUpTime, c(5.0603074, 3.4618446, 2.2113432), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(39.493229, 53.258703, 46.77542), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.83266548, 0.11, 0.53825584), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.2225769, 0.025692757, 0.10579404), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.42045819, 0.045583354, 0.25160664), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.18963039, 0.038723888, 0.18085515), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.64303509, 0.071276112, 0.35740069), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.4193774, 7.8220347, 7.2555625), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(13.316286, 12.307189, 11.466641), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(13.562832, 15.121757, 12.784878), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(17.060307, 15.461845, 14.211343), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 16.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[2, ], 38.5, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[3, ], 55) expect_equal(powerResult$numberOfSubjects[1, ], c(126.29066, 117.33052, 108.83344), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(180, 180, 171.99961), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(180, 180, 180)) expect_equal(powerResult$expectedNumberOfSubjects, c(168.04554, 178.38985, 170.45805), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[1, ], 0.27158358, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[2, ], 0.48064547, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleLower[3, ], 0.5627937, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[1, ], 3.6821078, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[2, ], 2.0805356, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScaleUpper[3, ], 1.77685, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[1, ], 0.025692757, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[2, ], 0.055458318, tolerance = 1e-07) expect_equal(powerResult$criticalValuesPValueScale[3, ], 0.072467622, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleLower, powerResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScaleUpper, powerResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesPValueScale, powerResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing the Power Calculation of Survival Designs for Other Parameter Variants") test_that("'getPowerSurvival': Four stage O'Brien and Fleming group sequential design with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:PowerGroupSequentialOneSided} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} powerResult <- getPowerSurvival( design = getDesignGroupSequential(kMax = 4), maxNumberOfEvents = 40, maxNumberOfSubjects = 200 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(6.1115255, 3.442577, 1.6316894, 0.30440109), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(39.87408, 38.142534, 33.62741, 28.346513), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.025, 0.30882929, 0.73475105, 0.94374207), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(2.5763449e-05, 0.00047146778, 0.0030806507, 0.012020122), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.0020845834, 0.034441261, 0.15314753, 0.35953485), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[3, ], c(0.0083455469, 0.11544971, 0.32172195, 0.41021864), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[4, ], c(0.014544106, 0.15846685, 0.25680093, 0.16196846), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.010455897, 0.15036244, 0.47795013, 0.78177362), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(8.2382885, 7.2643376, 6.5021817, 5.8683997), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(11.775158, 10.405299, 9.3411982, 8.4606249), tolerance = 1e-07) expect_equal(powerResult$analysisTime[3, ], c(14.851313, 12.90759, 11.580651, 10.517763), tolerance = 1e-07) expect_equal(powerResult$analysisTime[4, ], c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(18.070854, 14.972567, 12.292784, 10.112156), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(18.111525, 15.442577, 13.631689, 12.304401), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 10) expect_equal(powerResult$eventsPerStage[2, ], 20) expect_equal(powerResult$eventsPerStage[3, ], 30) expect_equal(powerResult$eventsPerStage[4, ], 40) expect_equal(powerResult$numberOfSubjects[1, ], c(137.30481, 121.07229, 108.36969, 97.806661), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(196.25264, 173.42164, 155.68664, 141.01041), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[3, ], c(200, 200, 193.01085, 175.29605), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[4, ], c(200, 200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(199.99057, 199.0474, 190.68267, 167.42879), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 12.942983, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 3.5976357, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[3, ], 2.3478921, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[4, ], 1.8967435, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': For fixed sample design, determine necessary accrual time if 200 subjects and 30 subjects per time unit can be recruited", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} powerResult <- getPowerSurvival( maxNumberOfEvents = 40, accrualTime = c(0), accrualIntensity = 30, maxNumberOfSubjects = 200 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(8.7010979, 6.004962, 4.1561659, 2.779256), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.367765, 12.671629, 10.822833, 9.4459226), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(15.367765, 12.671629, 10.822833, 9.4459226), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Determine necessary accrual time if 200 subjects and if the first 6 time units 20 subjects per time unit can be recruited, then 30 subjects per time unit", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} powerResult <- getPowerSurvival( maxNumberOfEvents = 40, accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$totalAccrualTime, 8.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(8.127286, 5.4402735, 3.6040872, 2.2435211), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$totalAccrualTime, powerResult$totalAccrualTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Determine maximum number of Subjects if the first 6 time units 20 subjects per time unit can be recruited, and after 10 time units 30 subjects per time unit", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} powerResult <- getPowerSurvival( maxNumberOfEvents = 40, accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30) ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$maxNumberOfSubjects, 240) expect_equal(powerResult$totalAccrualTime, 10) expect_equal(powerResult$followUpTime, c(5.3825871, 3.1889048, 1.691326, 0.58951828), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(240, 240, 240, 240)) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxNumberOfSubjects, powerResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$totalAccrualTime, powerResult$totalAccrualTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Specify accrual time as a list", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} at <- list("0 - <6" = 20, "6 - Inf" = 30) powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at, maxNumberOfSubjects = 200) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$totalAccrualTime, 8.6666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(8.127286, 5.4402735, 3.6040872, 2.2435211), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.793953, 14.10694, 12.270754, 10.910188), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$totalAccrualTime, powerResult$totalAccrualTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Specify accrual time as a list, if maximum number of subjects need to be calculated", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} at <- list("0 - <6" = 20, "6 - <=10" = 30) powerResult <- getPowerSurvival(maxNumberOfEvents = 40, accrualTime = at) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(powerResult$median2, 37.275405, tolerance = 1e-07) expect_equal(powerResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(powerResult$maxNumberOfSubjects, 240) expect_equal(powerResult$totalAccrualTime, 10) expect_equal(powerResult$followUpTime, c(5.3825871, 3.1889048, 1.691326, 0.58951828), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(40, 40, 40, 40)) expect_equal(powerResult$overallReject, c(0.025, 0.31674317, 0.74507635, 0.94783846), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(15.382587, 13.188905, 11.691326, 10.589518), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, c(240, 240, 240, 240)) expect_equal(powerResult$criticalValuesEffectScale[1, ], 1.8585471, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxNumberOfSubjects, powerResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$totalAccrualTime, powerResult$totalAccrualTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Specify effect size for a two-stage group design with O'Brien & Fleming boundaries Effect size is based on event rates at specified event time, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} powerResult <- getPowerSurvival( design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, 74.550809, tolerance = 1e-07) expect_equal(powerResult$median2, 46.640597, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.009297648, tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(powerResult$hazardRatio, 0.62562161, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 12.65889, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, 39.194966, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.31394451, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.04025172), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.27369279), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.040251721, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 14.822645, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 24.65889, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 24.262964, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 24.65889, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.28632231, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.53509093, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Effect size is based on event rate at specified event time for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:ShiftParameterSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} powerResult <- getPowerSurvival( design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$pi1, 0.16333997, tolerance = 1e-07) expect_equal(powerResult$median1, 93.281194, tolerance = 1e-07) expect_equal(powerResult$median2, 46.640597, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.007430728, tolerance = 1e-07) expect_equal(powerResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 14.346945, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, 37.874505, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.5879328, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.48165803), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.10627477, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 15.582247, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 26.346945, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 25.202929, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 26.346945, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.28632231, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.53509093, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$pi1, powerResult$pi1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda2, powerResult$lambda2, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Effect size is based on hazard rate for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} powerResult <- getPowerSurvival( design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, 69.314718, tolerance = 1e-07) expect_equal(powerResult$median2, 34.657359, tolerance = 1e-07) expect_equal(powerResult$lambda1, 0.01, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 9.1631017, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, 37.874505, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.5879328, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.48165803), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.10627477, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 13.164641, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 21.163102, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 20.313067, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 21.163102, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.28632231, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 0.53509093, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$lambda1, powerResult$lambda1, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time and hazard ratios", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalExponentialPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalGeneralPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialSurvival} powerResult <- getPowerSurvival( design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(4.2070411, 3.5734432, 3.2068918), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(39.412236, 38.617073, 37.874505), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.24668111, 0.45613948, 0.5879328), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0293882, 0.069146371, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.21729291, 0.38699311, 0.48165803), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.029388201, 0.069146372, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(12.173669, 11.705285, 11.428161), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.088508, 15.305974, 14.805308), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 195.08808, 190.46935), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 199.66036, 198.98713), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time as list and hazard ratios", { .skipTestIfDisabled() # @refFS[Formula]{fs:sampleSizeSurvivalExponentialPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalGeneralPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialSurvival} pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) powerResult <- getPowerSurvival( design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(4.2070411, 3.5734432, 3.2068918), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(39.412236, 38.617073, 37.874505), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.24668111, 0.45613948, 0.5879328), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0293882, 0.069146371, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.21729291, 0.38699311, 0.48165803), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.029388201, 0.069146372, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(12.173669, 11.705285, 11.428161), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.088508, 15.305974, 14.805308), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 195.08808, 190.46935), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 199.66036, 198.98713), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time for both treatment arms", { .skipTestIfDisabled() # @refFS[Formula]{fs:sampleSizeSurvivalExponentialPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalGeneralPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialSurvival} powerResult <- getPowerSurvival( design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015, 0.03, 0.06), maxNumberOfEvents = 40, maxNumberOfSubjects = 200 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$hazardRatio, 1.5, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, 4.2070411, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, 39.412236, tolerance = 1e-07) expect_equal(powerResult$overallReject, 0.24668111, tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c("stage = 1" = 0.0293882), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c("stage = 2" = 0.21729291), tolerance = 1e-07) expect_equal(powerResult$earlyStop, 0.029388201, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 12.173669, tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], 16.207041, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 16.088508, tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, 16.207041, tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$numberOfSubjects[1, ], 200) expect_equal(powerResult$numberOfSubjects[2, ], 200) expect_equal(powerResult$expectedNumberOfSubjects, 200) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Specification of piecewise exponential survival time as a list", { .skipTestIfDisabled() # @refFS[Formula]{fs:sampleSizeSurvivalExponentialPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalGeneralPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialSurvival} pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) powerResult <- getPowerSurvival( design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2), maxNumberOfEvents = 40, maxNumberOfSubjects = 200 ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, c(4.2070411, 3.5734432, 3.2068918), tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, c(39.412236, 38.617073, 37.874505), tolerance = 1e-07) expect_equal(powerResult$overallReject, c(0.24668111, 0.45613948, 0.5879328), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[1, ], c(0.0293882, 0.069146371, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$rejectPerStage[2, ], c(0.21729291, 0.38699311, 0.48165803), tolerance = 1e-07) expect_equal(powerResult$earlyStop, c(0.029388201, 0.069146372, 0.10627477), tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], c(12.173669, 11.705285, 11.428161), tolerance = 1e-07) expect_equal(powerResult$analysisTime[2, ], c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$studyDuration, c(16.088508, 15.305974, 14.805308), tolerance = 1e-07) expect_equal(powerResult$maxStudyDuration, c(16.207041, 15.573443, 15.206892), tolerance = 1e-07) expect_equal(powerResult$eventsPerStage[1, ], 20) expect_equal(powerResult$eventsPerStage[2, ], 40) expect_equal(powerResult$numberOfSubjects[1, ], c(200, 195.08808, 190.46935), tolerance = 1e-07) expect_equal(powerResult$numberOfSubjects[2, ], c(200, 200, 200)) expect_equal(powerResult$expectedNumberOfSubjects, c(200, 199.66036, 198.98713), tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 3.4925675, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[2, ], 1.8688412, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$rejectPerStage, powerResult$rejectPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$earlyStop, powerResult$earlyStop, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$maxStudyDuration, powerResult$maxStudyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$eventsPerStage, powerResult$eventsPerStage, tolerance = 1e-05) expect_equal(powerResultCodeBased$numberOfSubjects, powerResult$numberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Specify effect size based on median survival times (median1 = 5, median2 = 3)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:lambdabymedian} powerResult <- getPowerSurvival( lambda1 = log(2) / 5, lambda2 = log(2) / 3, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, 5) expect_equal(powerResult$median2, 3) expect_equal(powerResult$hazardRatio, 0.6, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, -5.9093279, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, 40) expect_equal(powerResult$overallReject, 0.36520074, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 6.0906721, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 6.0906721, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 101.5112, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.53805471, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Specify effect size based on median survival times of Weibull distribtion with kappa = 2 (median1 = 5, median2 = 3)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:lambdabymedian} powerResult <- getPowerSurvival( lambda1 = getLambdaByMedian(median = 5, kappa = 2), lambda2 = getLambdaByMedian(median = 3, kappa = 2), kappa = 2, maxNumberOfEvents = 40, maxNumberOfSubjects = 200, directionUpper = FALSE ) ## Comparison of the results of TrialDesignPlanSurvival object 'powerResult' with expected results expect_equal(powerResult$median1, 5) expect_equal(powerResult$median2, 3) expect_equal(powerResult$hazardRatio, 0.36, tolerance = 1e-07) expect_equal(powerResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(powerResult$followUpTime, -5.7378582, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfEvents, 40) expect_equal(powerResult$overallReject, 0.8980967, tolerance = 1e-07) expect_equal(powerResult$analysisTime[1, ], 6.2621418, tolerance = 1e-07) expect_equal(powerResult$studyDuration, 6.2621418, tolerance = 1e-07) expect_equal(powerResult$expectedNumberOfSubjects, 104.36903, tolerance = 1e-07) expect_equal(powerResult$criticalValuesEffectScale[1, ], 0.53805471, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(powerResult), NA))) expect_output(print(powerResult)$show()) invisible(capture.output(expect_error(summary(powerResult), NA))) expect_output(summary(powerResult)$show()) powerResultCodeBased <- eval(parse(text = getObjectRCode(powerResult, stringWrapParagraphWidth = NULL))) expect_equal(powerResultCodeBased$median1, powerResult$median1, tolerance = 1e-05) expect_equal(powerResultCodeBased$median2, powerResult$median2, tolerance = 1e-05) expect_equal(powerResultCodeBased$hazardRatio, powerResult$hazardRatio, tolerance = 1e-05) expect_equal(powerResultCodeBased$accrualIntensity, powerResult$accrualIntensity, tolerance = 1e-05) expect_equal(powerResultCodeBased$followUpTime, powerResult$followUpTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfEvents, powerResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(powerResultCodeBased$overallReject, powerResult$overallReject, tolerance = 1e-05) expect_equal(powerResultCodeBased$analysisTime, powerResult$analysisTime, tolerance = 1e-05) expect_equal(powerResultCodeBased$studyDuration, powerResult$studyDuration, tolerance = 1e-05) expect_equal(powerResultCodeBased$expectedNumberOfSubjects, powerResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(powerResultCodeBased$criticalValuesEffectScale, powerResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(powerResult), "character") df <- as.data.frame(powerResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(powerResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getPowerSurvival': Analysis time at last stage equals accrual time + follow-up time", { .skipTestIfDisabled() x1 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, lambda2 = 0.005, lambda1 = 0.01, maxNumberOfSubjects = 766, maxNumberOfEvents = 76 ) expect_equal(x1$overallReject, 1 - x1$.design$beta, tolerance = 0.01) expect_equal(x1$analysisTime[3], x1$accrualTime + x1$followUpTime) x2 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, maxNumberOfEvents = 76, maxNumberOfSubjects = 766, lambda2 = 0.005, lambda1 = 0.01 ) expect_equal(x2$analysisTime[3], x2$accrualTime + x2$followUpTime) x3 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "WT", deltaWT = 0.3), accrualTime = c(0, 12, 15), accrualIntensity = c(20, 30), lambda2 = 0.005, lambda1 = 0.01, maxNumberOfEvents = 76 ) expect_equal(x3$analysisTime[length(x3$analysisTime)], x3$accrualTime[length(x3$accrualTime)] + x3$followUpTime) x4 <- getPowerSurvival(getDesignGroupSequential(typeOfDesign = "WT", deltaWT = 0.3), accrualTime = c(0, 12, 15), accrualIntensity = c(40, 60), maxNumberOfEvents = 76, piecewiseSurvivalTime = c(0, 5), lambda2 = c(0.005, 0.01), hazardRatio = 0.8 ) expect_equal(x4$analysisTime[length(x4$analysisTime)], x4$accrualTime[length(x4$accrualTime)] + x4$followUpTime) }) rpact/tests/testthat/test-f_analysis_base_rates.R0000644000176200001440000046501314370207346022024 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_analysis_base_rates.R ## | Creation date: 06 February 2023, 12:05:27 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Analysis Rates Functionality for One Treatment") test_that("'getAnalysisResults' for a group sequential design and one treatment", { .skipTestIfDisabled() design0 <- getDesignGroupSequential( kMax = 2, alpha = 0.025, informationRates = c(0.2, 1), typeOfDesign = "asKD", gammaA = 2.8 ) dataExample0 <- getDataset( n = c(33), events = c(23) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeSmaller} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x0 <- getAnalysisResults( design = design0, dataInput = dataExample0, thetaH0 = 0.4, normalApproximation = FALSE, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x0' with expected results expect_equal(x0$pi1, 0.6969697, tolerance = 1e-06) expect_equal(x0$testActions, c("continue", NA_character_)) expect_equal(x0$conditionalRejectionProbabilities, c(0.28801679, NA_real_), tolerance = 1e-06) expect_equal(x0$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x0$repeatedConfidenceIntervalLowerBounds, c(0.38475339, NA_real_), tolerance = 1e-06) expect_equal(x0$repeatedConfidenceIntervalUpperBounds, c(0.91556361, NA_real_), tolerance = 1e-06) expect_equal(x0$repeatedPValues, c(0.048557231, NA_real_), tolerance = 1e-06) expect_equal(x0$finalStage, NA_integer_) expect_equal(x0$finalPValues, c(NA_real_, NA_real_)) expect_equal(x0$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x0$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x0$medianUnbiasedEstimates, c(NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x0), NA))) expect_output(print(x0)$show()) invisible(capture.output(expect_error(summary(x0), NA))) expect_output(summary(x0)$show()) x0CodeBased <- eval(parse(text = getObjectRCode(x0, stringWrapParagraphWidth = NULL))) expect_equal(x0CodeBased$pi1, x0$pi1, tolerance = 1e-05) expect_equal(x0CodeBased$testActions, x0$testActions, tolerance = 1e-05) expect_equal(x0CodeBased$conditionalRejectionProbabilities, x0$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x0CodeBased$conditionalPower, x0$conditionalPower, tolerance = 1e-05) expect_equal(x0CodeBased$repeatedConfidenceIntervalLowerBounds, x0$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x0CodeBased$repeatedConfidenceIntervalUpperBounds, x0$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x0CodeBased$repeatedPValues, x0$repeatedPValues, tolerance = 1e-05) expect_equal(x0CodeBased$finalStage, x0$finalStage, tolerance = 1e-05) expect_equal(x0CodeBased$finalPValues, x0$finalPValues, tolerance = 1e-05) expect_equal(x0CodeBased$finalConfidenceIntervalLowerBounds, x0$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x0CodeBased$finalConfidenceIntervalUpperBounds, x0$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x0CodeBased$medianUnbiasedEstimates, x0$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x0), "character") df <- as.data.frame(x0) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x0) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' for a four-stage group sequential design and one treatment", { .skipTestIfDisabled() design1 <- getDesignGroupSequential( kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(-0.5, 0, 0.5), typeOfDesign = "asKD", gammaA = 2.8 ) dataExample1 <- getDataset( n = c(10, 10, 20, 11), events = c(4, 5, 5, 6) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeSmaller} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} x1 <- getAnalysisResults( design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.75, normalApproximation = FALSE, directionUpper = FALSE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results expect_equal(x1$pi1, 0.45, tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x1$conditionalRejectionProbabilities, c(0.13502024, 0.39663603, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.035340833, 0.15564775, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.88809209, 0.77284164, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.49999905, 0.056127482, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$pi1, x1$pi1, tolerance = 1e-05) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateApproximationAlternativeSmaller} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneRate} # @refFS[Formula]{fs:medianUnbiasedEstimate} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} x2 <- getAnalysisResults( design = design1, dataInput = dataExample1, stage = 3, thetaH0 = 0.75, normalApproximation = TRUE, directionUpper = FALSE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results expect_equal(x2$pi1, 0.35, tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "reject and stop", "reject and stop", NA_character_)) expect_equal(x2$conditionalRejectionProbabilities, c(0.21465031, 0.55995383, 1, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.08898791, 0.19243551, 0.206358, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.81981977, 0.73748168, 0.52720845, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.47958473, 0.014066714, 1.9536724e-06, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, 2) expect_equal(x2$finalPValues, c(NA_real_, 0.0011783609, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, 0.18821106, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.62661997, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, 0.40681825, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$pi1, x2$pi1, tolerance = 1e-05) expect_equal(x2CodeBased$testActions, x2$testActions, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalStage, x2$finalStage, tolerance = 1e-05) expect_equal(x2CodeBased$finalPValues, x2$finalPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalLowerBounds, x2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalUpperBounds, x2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$medianUnbiasedEstimates, x2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeSmaller} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneRateEffect} x3 <- getAnalysisResults( design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.75, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = FALSE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x3' with expected results expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$conditionalRejectionProbabilities, c(0.13502024, 0.39663603, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.85193241, 0.94869662), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.035340833, 0.15564775, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.88809209, 0.77284164, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.49999905, 0.056127482, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$testActions, x3$testActions, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalStage, x3$finalStage, tolerance = 1e-05) expect_equal(x3CodeBased$finalPValues, x3$finalPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalLowerBounds, x3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalUpperBounds, x3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$medianUnbiasedEstimates, x3$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetAnalysisResultsPlotData(x3, piTreatmentRange = seq(0.45, 0.75, 0.05)) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.98024945, 0.94869662, 0.88988709, 0.79611571, 0.66506207, 0.50313626, 0.32784789), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(1, 0.9039239, 0.66761715, 0.40289032, 0.19865977, 0.080038099, 0.026347981), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 18") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateApproximationAlternativeSmaller} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:medianUnbiasedEstimate} # @refFS[Formula]{fs:finalCIOneRate} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneRateEffect} x4 <- getAnalysisResults( design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.75, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = FALSE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x4' with expected results expect_equal(x4$testActions, c("continue", "reject and stop", NA_character_, NA_character_)) expect_equal(x4$conditionalRejectionProbabilities, c(0.21465031, 0.55995383, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, 0.9494174, 0.9843063), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(0.08898791, 0.19243551, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.81981977, 0.73748168, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.47958473, 0.014066714, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, 2) expect_equal(x4$finalPValues, c(NA_real_, 0.0011783609, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, 0.18821106, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.62661997, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, 0.40681825, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$testActions, x4$testActions, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalRejectionProbabilities, x4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPower, x4$conditionalPower, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalLowerBounds, x4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalUpperBounds, x4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedPValues, x4$repeatedPValues, tolerance = 1e-05) expect_equal(x4CodeBased$finalStage, x4$finalStage, tolerance = 1e-05) expect_equal(x4CodeBased$finalPValues, x4$finalPValues, tolerance = 1e-05) expect_equal(x4CodeBased$finalConfidenceIntervalLowerBounds, x4$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$finalConfidenceIntervalUpperBounds, x4$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$medianUnbiasedEstimates, x4$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData2 <- testGetAnalysisResultsPlotData(x4, piTreatmentRange = seq(0.45, 0.75, 0.05)) ## Comparison of the results of list object 'plotData2' with expected results expect_equal(plotData2$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.99501417, 0.9843063, 0.96005739, 0.91353722, 0.83535366, 0.71802165, 0.55995335), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(1, 0.9039239, 0.66761715, 0.40289032, 0.19865977, 0.080038099, 0.026347981), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 18") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeGreater} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneRateEffect} x5 <- getAnalysisResults( design = design1, dataInput = dataExample1, stage = 3, thetaH0 = 0.25, normalApproximation = FALSE, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x5' with expected results expect_equal(x5$pi1, 0.35, tolerance = 1e-07) expect_equal(x5$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x5$conditionalRejectionProbabilities, c(0.033369687, 0.13517192, 0.020135528, NA_real_), tolerance = 1e-07) expect_equal(x5$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$repeatedConfidenceIntervalLowerBounds, c(0.035340833, 0.15564775, 0.18966473, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalUpperBounds, c(0.88809209, 0.77284164, 0.53925561, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedPValues, c(0.49999905, 0.49999905, 0.20027888, NA_real_), tolerance = 1e-07) expect_equal(x5$finalStage, NA_integer_) expect_equal(x5$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$pi1, x5$pi1, tolerance = 1e-05) expect_equal(x5CodeBased$testActions, x5$testActions, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalRejectionProbabilities, x5$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPower, x5$conditionalPower, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedConfidenceIntervalLowerBounds, x5$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedConfidenceIntervalUpperBounds, x5$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedPValues, x5$repeatedPValues, tolerance = 1e-05) expect_equal(x5CodeBased$finalStage, x5$finalStage, tolerance = 1e-05) expect_equal(x5CodeBased$finalPValues, x5$finalPValues, tolerance = 1e-05) expect_equal(x5CodeBased$finalConfidenceIntervalLowerBounds, x5$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x5CodeBased$finalConfidenceIntervalUpperBounds, x5$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x5CodeBased$medianUnbiasedEstimates, x5$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateApproximationAlternativeGreater} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneRateEffect} x6 <- getAnalysisResults( design = design1, dataInput = dataExample1, stage = 3, thetaH0 = 0.25, normalApproximation = TRUE, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x6' with expected results expect_equal(x6$pi1, 0.35, tolerance = 1e-07) expect_equal(x6$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x6$conditionalRejectionProbabilities, c(0.049321562, 0.20984263, 0.048813267, NA_real_), tolerance = 1e-07) expect_equal(x6$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$repeatedConfidenceIntervalLowerBounds, c(0.08898791, 0.19243551, 0.206358, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalUpperBounds, c(0.81981977, 0.73748168, 0.52720845, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedPValues, c(0.49999905, 0.27035282, 0.14086509, NA_real_), tolerance = 1e-07) expect_equal(x6$finalStage, NA_integer_) expect_equal(x6$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$pi1, x6$pi1, tolerance = 1e-05) expect_equal(x6CodeBased$testActions, x6$testActions, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalRejectionProbabilities, x6$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPower, x6$conditionalPower, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedConfidenceIntervalLowerBounds, x6$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedConfidenceIntervalUpperBounds, x6$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedPValues, x6$repeatedPValues, tolerance = 1e-05) expect_equal(x6CodeBased$finalStage, x6$finalStage, tolerance = 1e-05) expect_equal(x6CodeBased$finalPValues, x6$finalPValues, tolerance = 1e-05) expect_equal(x6CodeBased$finalConfidenceIntervalLowerBounds, x6$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x6CodeBased$finalConfidenceIntervalUpperBounds, x6$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x6CodeBased$medianUnbiasedEstimates, x6$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeGreater} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneRateEffect} x7 <- getAnalysisResults( design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.25, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x7' with expected results expect_equal(x7$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x7$conditionalRejectionProbabilities, c(0.033369687, 0.13517192, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$conditionalPower, c(NA_real_, NA_real_, 0.58576815, 0.82581584), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalLowerBounds, c(0.035340833, 0.15564775, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalUpperBounds, c(0.88809209, 0.77284164, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedPValues, c(0.49999905, 0.49999905, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$finalStage, NA_integer_) expect_equal(x7$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7), NA))) expect_output(print(x7)$show()) invisible(capture.output(expect_error(summary(x7), NA))) expect_output(summary(x7)$show()) x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL))) expect_equal(x7CodeBased$testActions, x7$testActions, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalRejectionProbabilities, x7$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalPower, x7$conditionalPower, tolerance = 1e-05) expect_equal(x7CodeBased$repeatedConfidenceIntervalLowerBounds, x7$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x7CodeBased$repeatedConfidenceIntervalUpperBounds, x7$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x7CodeBased$repeatedPValues, x7$repeatedPValues, tolerance = 1e-05) expect_equal(x7CodeBased$finalStage, x7$finalStage, tolerance = 1e-05) expect_equal(x7CodeBased$finalPValues, x7$finalPValues, tolerance = 1e-05) expect_equal(x7CodeBased$finalConfidenceIntervalLowerBounds, x7$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x7CodeBased$finalConfidenceIntervalUpperBounds, x7$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x7CodeBased$medianUnbiasedEstimates, x7$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x7), "character") df <- as.data.frame(x7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData3 <- testGetAnalysisResultsPlotData(x7, piTreatmentRange = seq(0.25, 0.55, 0.05)) ## Comparison of the results of list object 'plotData3' with expected results expect_equal(plotData3$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData3$condPowerValues, c(0.099723848, 0.21903134, 0.37478113, 0.54310492, 0.6994843, 0.82581584, 0.91388884), tolerance = 1e-07) expect_equal(plotData3$likelihoodValues, c(0.19865977, 0.40289032, 0.66761715, 0.9039239, 1, 0.9039239, 0.66761715), tolerance = 1e-07) expect_equal(plotData3$main, "Conditional Power with Likelihood") expect_equal(plotData3$xlab, "pi1") expect_equal(plotData3$ylab, "Conditional power / Likelihood") expect_equal(plotData3$sub, "Stage = 2, # of remaining subjects = 18") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateApproximationAlternativeGreater} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneRateEffect} x8 <- getAnalysisResults( design = design1, dataInput = dataExample1, stage = 2, thetaH0 = 0.25, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x8' with expected results expect_equal(x8$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x8$conditionalRejectionProbabilities, c(0.049321562, 0.20984263, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$conditionalPower, c(NA_real_, NA_real_, 0.76152324, 0.91259792), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalLowerBounds, c(0.08898791, 0.19243551, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalUpperBounds, c(0.81981977, 0.73748168, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedPValues, c(0.49999905, 0.27035282, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$finalStage, NA_integer_) expect_equal(x8$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8), NA))) expect_output(print(x8)$show()) invisible(capture.output(expect_error(summary(x8), NA))) expect_output(summary(x8)$show()) x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL))) expect_equal(x8CodeBased$testActions, x8$testActions, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalRejectionProbabilities, x8$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalPower, x8$conditionalPower, tolerance = 1e-05) expect_equal(x8CodeBased$repeatedConfidenceIntervalLowerBounds, x8$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x8CodeBased$repeatedConfidenceIntervalUpperBounds, x8$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x8CodeBased$repeatedPValues, x8$repeatedPValues, tolerance = 1e-05) expect_equal(x8CodeBased$finalStage, x8$finalStage, tolerance = 1e-05) expect_equal(x8CodeBased$finalPValues, x8$finalPValues, tolerance = 1e-05) expect_equal(x8CodeBased$finalConfidenceIntervalLowerBounds, x8$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x8CodeBased$finalConfidenceIntervalUpperBounds, x8$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x8CodeBased$medianUnbiasedEstimates, x8$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x8), "character") df <- as.data.frame(x8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData4 <- testGetAnalysisResultsPlotData(x8, piTreatmentRange = seq(0.25, 0.55, 0.05)) ## Comparison of the results of list object 'plotData4' with expected results expect_equal(plotData4$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData4$condPowerValues, c(0.20983879, 0.3743042, 0.5481143, 0.70471917, 0.82789376, 0.91259792, 0.96272982), tolerance = 1e-07) expect_equal(plotData4$likelihoodValues, c(0.19865977, 0.40289032, 0.66761715, 0.9039239, 1, 0.9039239, 0.66761715), tolerance = 1e-07) expect_equal(plotData4$main, "Conditional Power with Likelihood") expect_equal(plotData4$xlab, "pi1") expect_equal(plotData4$ylab, "Conditional power / Likelihood") expect_equal(plotData4$sub, "Stage = 2, # of remaining subjects = 18") }) test_that("'getAnalysisResults' for a four-stage inverse sequential design and one treatment", { .skipTestIfDisabled() design2 <- getDesignInverseNormal( kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(-0.5, 0, 0.5), typeOfDesign = "asKD", gammaA = 2.8 ) dataExample2 <- getDataset( n = c(8, 10, 9, 11), # cumsum, overall n = (8, 18, 27, 38) events = c(4, 5, 5, 6) # cumsum, overall events = (4, 9, 14, 20) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x1 <- getAnalysisResults( design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.75, normalApproximation = FALSE, directionUpper = FALSE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results expect_equal(x1$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x1$conditionalRejectionProbabilities, c(0.055828725, 0.15918316, 0.28098687, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.046266926, 0.16132369, 0.26858957, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.95373307, 0.83867631, 0.76870127, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.49999905, 0.43799317, 0.045574143, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$pi1, x1$pi1, tolerance = 1e-05) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateApproximationAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneRate} # @refFS[Formula]{fs:medianUnbiasedEstimate} x2 <- getAnalysisResults( design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.75, normalApproximation = TRUE, directionUpper = FALSE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results expect_equal(x2$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x2$conditionalRejectionProbabilities, c(0.08807963, 0.32350578, 0.78413539, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.11314487, 0.21610036, 0.31861038, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.88685513, 0.78389964, 0.72001945, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.49999905, 0.1020964, 0.0075111702, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, 3) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, 0.0050707339, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.3041323, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.68870859, NA_real_), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.49547717, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$pi1, x2$pi1, tolerance = 1e-05) expect_equal(x2CodeBased$testActions, x2$testActions, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalStage, x2$finalStage, tolerance = 1e-05) expect_equal(x2CodeBased$finalPValues, x2$finalPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalLowerBounds, x2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalUpperBounds, x2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$medianUnbiasedEstimates, x2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x3 <- getAnalysisResults( design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.75, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = FALSE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x3' with expected results expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$conditionalRejectionProbabilities, c(0.055828725, 0.15918316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.69921202, 0.88465983), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.046266926, 0.16132369, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.95373307, 0.83867631, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.49999905, 0.43799317, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$testActions, x3$testActions, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalStage, x3$finalStage, tolerance = 1e-05) expect_equal(x3CodeBased$finalPValues, x3$finalPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalLowerBounds, x3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalUpperBounds, x3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$medianUnbiasedEstimates, x3$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetAnalysisResultsPlotData(x3, piTreatmentRange = seq(0.45, 0.75, 0.05)) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.94793138, 0.88465983, 0.78396384, 0.64581102, 0.48045808, 0.30888817, 0.15917802), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.91393119, 1, 0.91393119, 0.69767633, 0.44485807, 0.23692776, 0.10539922), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 18") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateApproximationAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x4 <- getAnalysisResults( design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.75, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = FALSE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x4' with expected results expect_equal(x4$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x4$conditionalRejectionProbabilities, c(0.08807963, 0.32350578, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, 0.85385983, 0.95015898), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(0.11314487, 0.21610036, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.88685513, 0.78389964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.49999905, 0.1020964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, NA_integer_) expect_equal(x4$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$testActions, x4$testActions, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalRejectionProbabilities, x4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPower, x4$conditionalPower, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalLowerBounds, x4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalUpperBounds, x4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedPValues, x4$repeatedPValues, tolerance = 1e-05) expect_equal(x4CodeBased$finalStage, x4$finalStage, tolerance = 1e-05) expect_equal(x4CodeBased$finalPValues, x4$finalPValues, tolerance = 1e-05) expect_equal(x4CodeBased$finalConfidenceIntervalLowerBounds, x4$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$finalConfidenceIntervalUpperBounds, x4$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$medianUnbiasedEstimates, x4$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData2 <- testGetAnalysisResultsPlotData(x4, piTreatmentRange = seq(0.45, 0.75, 0.05)) ## Comparison of the results of list object 'plotData2' with expected results expect_equal(plotData2$xValues, c(0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.98088099, 0.95015898, 0.89232289, 0.79901831, 0.66708346, 0.50248974, 0.32350375), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.91393119, 1, 0.91393119, 0.69767633, 0.44485807, 0.23692776, 0.10539922), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 18") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x5 <- getAnalysisResults( design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.25, normalApproximation = FALSE, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x5' with expected results expect_equal(x5$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x5$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x5$conditionalRejectionProbabilities, c(0.055828725, 0.15918316, 0.65085211, NA_real_), tolerance = 1e-07) expect_equal(x5$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$repeatedConfidenceIntervalLowerBounds, c(0.046266926, 0.16132369, 0.26858957, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalUpperBounds, c(0.95373307, 0.83867631, 0.76870127, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedPValues, c(0.49999905, 0.43799317, 0.013282796, NA_real_), tolerance = 1e-07) expect_equal(x5$finalStage, 3) expect_equal(x5$finalPValues, c(NA_real_, NA_real_, 0.007752129, NA_real_), tolerance = 1e-07) expect_equal(x5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.29554194, NA_real_), tolerance = 1e-07) expect_equal(x5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.67875285, NA_real_), tolerance = 1e-07) expect_equal(x5$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.48769629, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$pi1, x5$pi1, tolerance = 1e-05) expect_equal(x5CodeBased$testActions, x5$testActions, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalRejectionProbabilities, x5$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPower, x5$conditionalPower, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedConfidenceIntervalLowerBounds, x5$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedConfidenceIntervalUpperBounds, x5$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedPValues, x5$repeatedPValues, tolerance = 1e-05) expect_equal(x5CodeBased$finalStage, x5$finalStage, tolerance = 1e-05) expect_equal(x5CodeBased$finalPValues, x5$finalPValues, tolerance = 1e-05) expect_equal(x5CodeBased$finalConfidenceIntervalLowerBounds, x5$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x5CodeBased$finalConfidenceIntervalUpperBounds, x5$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x5CodeBased$medianUnbiasedEstimates, x5$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateApproximationAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x6 <- getAnalysisResults( design = design2, dataInput = dataExample2, stage = 3, thetaH0 = 0.25, normalApproximation = TRUE, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x6' with expected results expect_equal(x6$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x6$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x6$conditionalRejectionProbabilities, c(0.08807963, 0.32350578, 0.96959663, NA_real_), tolerance = 1e-07) expect_equal(x6$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$repeatedConfidenceIntervalLowerBounds, c(0.11314487, 0.21610036, 0.31861038, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalUpperBounds, c(0.88685513, 0.78389964, 0.72001945, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedPValues, c(0.49999905, 0.1020964, 0.0013103922, NA_real_), tolerance = 1e-07) expect_equal(x6$finalStage, 3) expect_equal(x6$finalPValues, c(NA_real_, NA_real_, 0.002378519, NA_real_), tolerance = 1e-07) expect_equal(x6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.3437363, NA_real_), tolerance = 1e-07) expect_equal(x6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.73847376, NA_real_), tolerance = 1e-07) expect_equal(x6$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.54446903, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$pi1, x6$pi1, tolerance = 1e-05) expect_equal(x6CodeBased$testActions, x6$testActions, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalRejectionProbabilities, x6$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPower, x6$conditionalPower, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedConfidenceIntervalLowerBounds, x6$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedConfidenceIntervalUpperBounds, x6$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedPValues, x6$repeatedPValues, tolerance = 1e-05) expect_equal(x6CodeBased$finalStage, x6$finalStage, tolerance = 1e-05) expect_equal(x6CodeBased$finalPValues, x6$finalPValues, tolerance = 1e-05) expect_equal(x6CodeBased$finalConfidenceIntervalLowerBounds, x6$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x6CodeBased$finalConfidenceIntervalUpperBounds, x6$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x6CodeBased$medianUnbiasedEstimates, x6$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneRateEffect} x7 <- getAnalysisResults( design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.25, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x7' with expected results expect_equal(x7$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x7$conditionalRejectionProbabilities, c(0.055828725, 0.15918316, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$conditionalPower, c(NA_real_, NA_real_, 0.69921202, 0.88465983), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalLowerBounds, c(0.046266926, 0.16132369, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalUpperBounds, c(0.95373307, 0.83867631, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedPValues, c(0.49999905, 0.43799317, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$finalStage, NA_integer_) expect_equal(x7$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7), NA))) expect_output(print(x7)$show()) invisible(capture.output(expect_error(summary(x7), NA))) expect_output(summary(x7)$show()) x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL))) expect_equal(x7CodeBased$testActions, x7$testActions, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalRejectionProbabilities, x7$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalPower, x7$conditionalPower, tolerance = 1e-05) expect_equal(x7CodeBased$repeatedConfidenceIntervalLowerBounds, x7$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x7CodeBased$repeatedConfidenceIntervalUpperBounds, x7$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x7CodeBased$repeatedPValues, x7$repeatedPValues, tolerance = 1e-05) expect_equal(x7CodeBased$finalStage, x7$finalStage, tolerance = 1e-05) expect_equal(x7CodeBased$finalPValues, x7$finalPValues, tolerance = 1e-05) expect_equal(x7CodeBased$finalConfidenceIntervalLowerBounds, x7$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x7CodeBased$finalConfidenceIntervalUpperBounds, x7$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x7CodeBased$medianUnbiasedEstimates, x7$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x7), "character") df <- as.data.frame(x7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData3 <- testGetAnalysisResultsPlotData(x7, piTreatmentRange = seq(0.25, 0.55, 0.05)) ## Comparison of the results of list object 'plotData3' with expected results expect_equal(plotData3$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData3$condPowerValues, c(0.15917802, 0.30888817, 0.48045808, 0.64581102, 0.78396384, 0.88465983, 0.94793138), tolerance = 1e-07) expect_equal(plotData3$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData3$main, "Conditional Power with Likelihood") expect_equal(plotData3$xlab, "pi1") expect_equal(plotData3$ylab, "Conditional power / Likelihood") expect_equal(plotData3$sub, "Stage = 2, # of remaining subjects = 18") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneRateEffect} x8 <- getAnalysisResults( design = design2, dataInput = dataExample2, stage = 2, thetaH0 = 0.25, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = TRUE, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x8' with expected results expect_equal(x8$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x8$conditionalRejectionProbabilities, c(0.08807963, 0.32350578, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$conditionalPower, c(NA_real_, NA_real_, 0.85385983, 0.95015898), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalLowerBounds, c(0.11314487, 0.21610036, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalUpperBounds, c(0.88685513, 0.78389964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedPValues, c(0.49999905, 0.1020964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$finalStage, NA_integer_) expect_equal(x8$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8), NA))) expect_output(print(x8)$show()) invisible(capture.output(expect_error(summary(x8), NA))) expect_output(summary(x8)$show()) x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL))) expect_equal(x8CodeBased$testActions, x8$testActions, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalRejectionProbabilities, x8$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalPower, x8$conditionalPower, tolerance = 1e-05) expect_equal(x8CodeBased$repeatedConfidenceIntervalLowerBounds, x8$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x8CodeBased$repeatedConfidenceIntervalUpperBounds, x8$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x8CodeBased$repeatedPValues, x8$repeatedPValues, tolerance = 1e-05) expect_equal(x8CodeBased$finalStage, x8$finalStage, tolerance = 1e-05) expect_equal(x8CodeBased$finalPValues, x8$finalPValues, tolerance = 1e-05) expect_equal(x8CodeBased$finalConfidenceIntervalLowerBounds, x8$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x8CodeBased$finalConfidenceIntervalUpperBounds, x8$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x8CodeBased$medianUnbiasedEstimates, x8$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x8), "character") df <- as.data.frame(x8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData4 <- testGetAnalysisResultsPlotData(x8, piTreatmentRange = seq(0.25, 0.55, 0.05)) ## Comparison of the results of list object 'plotData4' with expected results expect_equal(plotData4$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData4$condPowerValues, c(0.32350375, 0.50248974, 0.66708346, 0.79901831, 0.89232289, 0.95015898, 0.98088099), tolerance = 1e-07) expect_equal(plotData4$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData4$main, "Conditional Power with Likelihood") expect_equal(plotData4$xlab, "pi1") expect_equal(plotData4$ylab, "Conditional power / Likelihood") expect_equal(plotData4$sub, "Stage = 2, # of remaining subjects = 18") }) test_that("'getAnalysisResults' for a four-stage Fisher design and one treatment", { .skipTestIfDisabled() design3 <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.4, 0.8, 1)) dataExample3 <- getDataset( n = c(8, 10, 9, 11), # cumsum, overall n = (8, 18, 27, 38) events = c(4, 5, 5, 6) # cumsum, overall events = (4, 9, 14, 20) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} x1 <- getAnalysisResults( design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.75, normalApproximation = FALSE, directionUpper = FALSE, iterations = 1000, seed = 123 ) ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results expect_equal(x1$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x1$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, 0.018233808, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, 0.23521677, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, 0.79971589, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.23393398, 0.11483365, 0.11050779, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$pi1, x1$pi1, tolerance = 1e-05) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateApproximationAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} x2 <- getAnalysisResults( design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.75, normalApproximation = TRUE, directionUpper = FALSE, iterations = 1000, seed = 123 ) ## Comparison of the results of AnalysisResultsFisher object 'x2' with expected results expect_equal(x2$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x2$conditionalRejectionProbabilities, c(0.051264101, 0.1206033, 1, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.18175814, 0.2424364, 0.28642867, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.81824186, 0.7575636, 0.7496417, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.11554509, 0.032131177, 0.024656293, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$pi1, x2$pi1, tolerance = 1e-05) expect_equal(x2CodeBased$testActions, x2$testActions, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalStage, x2$finalStage, tolerance = 1e-05) expect_equal(x2CodeBased$finalPValues, x2$finalPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalLowerBounds, x2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalUpperBounds, x2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$medianUnbiasedEstimates, x2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionFisherInterim} x3 <- getAnalysisResults( design = design3, dataInput = dataExample3, stage = 2, thetaH0 = 0.75, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = FALSE, iterations = 1000, seed = 123 ) ## Comparison of the results of AnalysisResultsFisher object 'x3' with expected results expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.23393398, 0.11483365, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.522, 0.672), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$testActions, x3$testActions, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalStage, x3$finalStage, tolerance = 1e-05) expect_equal(x3CodeBased$finalPValues, x3$finalPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalLowerBounds, x3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalUpperBounds, x3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$medianUnbiasedEstimates, x3$medianUnbiasedEstimates, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPowerSimulated, x3$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetAnalysisResultsPlotData(x3, piTreatmentRange = seq(0.25, 0.55, 0.05)) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.997, 0.99, 0.967, 0.9, 0.822, 0.659, 0.534), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 18") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} x4 <- getAnalysisResults( design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.25, normalApproximation = FALSE, directionUpper = TRUE, iterations = 1000, seed = 123 ) ## Comparison of the results of AnalysisResultsFisher object 'x4' with expected results expect_equal(x4$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x4$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(x4$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, 0.10237226, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, 0.23521677, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, 0.79971589, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.23393398, 0.11483365, 0.040061917, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, NA_integer_) expect_equal(x4$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$pi1, x4$pi1, tolerance = 1e-05) expect_equal(x4CodeBased$testActions, x4$testActions, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalRejectionProbabilities, x4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPower, x4$conditionalPower, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalLowerBounds, x4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalUpperBounds, x4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedPValues, x4$repeatedPValues, tolerance = 1e-05) expect_equal(x4CodeBased$finalStage, x4$finalStage, tolerance = 1e-05) expect_equal(x4CodeBased$finalPValues, x4$finalPValues, tolerance = 1e-05) expect_equal(x4CodeBased$finalConfidenceIntervalLowerBounds, x4$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$finalConfidenceIntervalUpperBounds, x4$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$medianUnbiasedEstimates, x4$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateApproximationAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} x5 <- getAnalysisResults( design = design3, dataInput = dataExample3, stage = 3, thetaH0 = 0.25, normalApproximation = TRUE, directionUpper = TRUE, iterations = 1000, seed = 123 ) ## Comparison of the results of AnalysisResultsFisher object 'x5' with expected results expect_equal(x5$pi1, 0.51851852, tolerance = 1e-07) expect_equal(x5$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(x5$conditionalRejectionProbabilities, c(0.051264101, 0.1206033, 1, NA_real_), tolerance = 1e-07) expect_equal(x5$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$repeatedConfidenceIntervalLowerBounds, c(0.18175814, 0.2424364, 0.28642867, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalUpperBounds, c(0.81824186, 0.7575636, 0.7496417, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedPValues, c(0.11554509, 0.032131177, 0.0055275316, NA_real_), tolerance = 1e-07) expect_equal(x5$finalStage, NA_integer_) expect_equal(x5$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$pi1, x5$pi1, tolerance = 1e-05) expect_equal(x5CodeBased$testActions, x5$testActions, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalRejectionProbabilities, x5$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPower, x5$conditionalPower, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedConfidenceIntervalLowerBounds, x5$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedConfidenceIntervalUpperBounds, x5$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedPValues, x5$repeatedPValues, tolerance = 1e-05) expect_equal(x5CodeBased$finalStage, x5$finalStage, tolerance = 1e-05) expect_equal(x5CodeBased$finalPValues, x5$finalPValues, tolerance = 1e-05) expect_equal(x5CodeBased$finalConfidenceIntervalLowerBounds, x5$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x5CodeBased$finalConfidenceIntervalUpperBounds, x5$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x5CodeBased$medianUnbiasedEstimates, x5$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticOneRateApproximation} # @refFS[Formula]{fs:pValuesOneRateAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionFisherInterim} x6 <- getAnalysisResults( design = design3, dataInput = dataExample3, stage = 2, thetaH0 = 0.25, nPlanned = c(12, 6), pi1 = 0.5, normalApproximation = FALSE, directionUpper = TRUE, iterations = 1000, seed = 123 ) ## Comparison of the results of AnalysisResultsFisher object 'x6' with expected results expect_equal(x6$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x6$conditionalRejectionProbabilities, c(0.027980027, 0.040164764, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalLowerBounds, c(0.12025548, 0.19023888, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalUpperBounds, c(0.87974452, 0.80976112, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedPValues, c(0.23393398, 0.11483365, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$finalStage, NA_integer_) expect_equal(x6$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.522, 0.672), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$testActions, x6$testActions, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalRejectionProbabilities, x6$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedConfidenceIntervalLowerBounds, x6$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedConfidenceIntervalUpperBounds, x6$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedPValues, x6$repeatedPValues, tolerance = 1e-05) expect_equal(x6CodeBased$finalStage, x6$finalStage, tolerance = 1e-05) expect_equal(x6CodeBased$finalPValues, x6$finalPValues, tolerance = 1e-05) expect_equal(x6CodeBased$finalConfidenceIntervalLowerBounds, x6$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x6CodeBased$finalConfidenceIntervalUpperBounds, x6$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x6CodeBased$medianUnbiasedEstimates, x6$medianUnbiasedEstimates, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPowerSimulated, x6$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData2 <- testGetAnalysisResultsPlotData(x6, piTreatmentRange = seq(0.25, 0.55, 0.05)) ## Comparison of the results of list object 'plotData2' with expected results expect_equal(plotData2$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.039, 0.114, 0.208, 0.363, 0.54, 0.659, 0.817), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.10539922, 0.23692776, 0.44485807, 0.69767633, 0.91393119, 1, 0.91393119), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 18") }) test_plan_section("Testing the Analysis Rates Functionality for Two Treatments") test_that("'getAnalysisResults' for a four-stage group sequential design and two treatments", { .skipTestIfDisabled() design7 <- getDesignGroupSequential( kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.25, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(0, 0.5, 0.8), bindingFutility = TRUE ) dataExample5 <- getDataset( n1 = c(17, 18, 22), n2 = c(18, 17, 19), events1 = c(11, 12, 17), events2 = c(5, 10, 7) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeGreater} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoRatesEffect} # @refFS[Formula]{fs:conditionalPowerTwoRatesSampleSizes} x1 <- getAnalysisResults(design7, dataExample5, thetaH0 = 0, stage = 2, nPlanned = c(60, 30), pi2 = 0.4, pi1 = 0.8, directionUpper = TRUE, allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results expect_equal(x1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x1$conditionalRejectionProbabilities, c(0.19002543, 0.18837824, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, 0.97639752, 0.99770454), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.14000084, -0.07626859, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.72492429, 0.4944942, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.083297609, 0.074571507, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetAnalysisResultsPlotData(x1, piTreatmentRange = seq(0.5, 0.8, 0.05), allocationRatioPlanned = 2) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.47726473, 0.64780315, 0.79588169, 0.90153211, 0.96202912, 0.98889368, 0.99770454), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.14689674, 0.40998118, 0.77598415, 0.99604498, 0.86704618, 0.51184997, 0.20491809), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.4, allocation ratio = 2") # reversed "directionUpper" # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeSmaller} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoRatesEffect} # @refFS[Formula]{fs:conditionalPowerTwoRatesSampleSizes} x2 <- getAnalysisResults(design7, dataExample5, thetaH0 = 0, stage = 2, nPlanned = c(60, 30), pi2 = 0.8, pi1 = 0.4, directionUpper = FALSE, allocationRatioPlanned = 0.5 ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results expect_equal(x2$testActions, c("accept and stop", "accept and stop", NA_character_, NA_character_)) expect_equal(x2$conditionalRejectionProbabilities, c(0, 0, NA_real_, NA_real_)) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.037603851, 0.34743098), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.14000084, -0.07626859, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.72492429, 0.4944942, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.49999905, 0.49999905, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, 1) expect_equal(x2$finalPValues, c(0.98580558, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(0.039328966, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(0.62730979, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(0.36928105, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$testActions, x2$testActions, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalStage, x2$finalStage, tolerance = 1e-05) expect_equal(x2CodeBased$finalPValues, x2$finalPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalLowerBounds, x2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalUpperBounds, x2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$medianUnbiasedEstimates, x2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData2 <- testGetAnalysisResultsPlotData(x2, piTreatmentRange = seq(0.2, 0.5, 0.05), allocationRatioPlanned = 0.5) ## Comparison of the results of list object 'plotData2' with expected results expect_equal(plotData2$xValues, c(0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.98964948, 0.93182725, 0.78360503, 0.56553646, 0.34743098, 0.18277547, 0.082851862), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(8.9244677e-08, 2.5604189e-06, 4.9816924e-05, 0.00065732471, 0.0058819346, 0.035694195, 0.14689674), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.8, allocation ratio = 0.5") }) test_that("'getAnalysisResults' for a four-stage inverse normal design and two treatments", { .skipTestIfDisabled() design8 <- getDesignInverseNormal( kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.25, informationRates = c(0.2, 0.4, 0.8, 1), futilityBounds = c(0, 0.5, 0.8), bindingFutility = TRUE ) dataExample6 <- getDataset( n1 = c(17, 18, 22), n2 = c(18, 17, 19), events1 = c(11, 12, 17), events2 = c(5, 10, 7) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoRatesEffect} # @refFS[Formula]{fs:conditionalPowerTwoRatesSampleSizes} x1 <- getAnalysisResults(design8, dataExample6, thetaH0 = 0.0, stage = 2, nPlanned = c(30, 30), pi2 = 0.2, pi1 = 0.4, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results expect_equal(x1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x1$conditionalRejectionProbabilities, c(0.19002543, 0.18093983, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, 0.51829859, 0.74637814), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.14000084, -0.078581193, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.72492429, 0.48870113, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.083297609, 0.077943692, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetAnalysisResultsPlotData(x1, piTreatmentRange = seq(0.4, 0.7, 0.05), nPlanned = c(30, 30)) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.74637814, 0.85191228, 0.92421447, 0.96693166, 0.98816058, 0.99670572, 0.99934119), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.0058819346, 0.035694195, 0.14689674, 0.40998118, 0.77598415, 0.99604498, 0.86704618), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 60, pi2 = 0.2, allocation ratio = 1") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoRatesEffect} # @refFS[Formula]{fs:conditionalPowerTwoRatesSampleSizes} x3 <- getAnalysisResults(design8, dataExample6, thetaH0 = 0, stage = 2, nPlanned = c(60, 30), pi2 = 0.4, pi1 = 0.8, directionUpper = TRUE, allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x3' with expected results expect_equal(x3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x3$conditionalRejectionProbabilities, c(0.19002543, 0.18093983, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.97637134, 0.99770045), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(-0.14000084, -0.078581193, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.72492429, 0.48870113, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.083297609, 0.077943692, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$testActions, x3$testActions, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalStage, x3$finalStage, tolerance = 1e-05) expect_equal(x3CodeBased$finalPValues, x3$finalPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalLowerBounds, x3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalUpperBounds, x3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$medianUnbiasedEstimates, x3$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData4 <- testGetAnalysisResultsPlotData(x3, piTreatmentRange = seq(0.5, 0.8, 0.05), allocationRatioPlanned = 2) ## Comparison of the results of list object 'plotData4' with expected results expect_equal(plotData4$xValues, c(0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8), tolerance = 1e-07) expect_equal(plotData4$condPowerValues, c(0.4771434, 0.64764919, 0.79574037, 0.90143545, 0.96198044, 0.98887633, 0.99770045), tolerance = 1e-07) expect_equal(plotData4$likelihoodValues, c(0.14689674, 0.40998118, 0.77598415, 0.99604498, 0.86704618, 0.51184997, 0.20491809), tolerance = 1e-07) expect_equal(plotData4$main, "Conditional Power with Likelihood") expect_equal(plotData4$xlab, "pi1") expect_equal(plotData4$ylab, "Conditional power / Likelihood") expect_equal(plotData4$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.4, allocation ratio = 2") # reversed "directionUpper" # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeSmaller} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoRatesEffect} # @refFS[Formula]{fs:conditionalPowerTwoRatesSampleSizes} x4 <- getAnalysisResults(design8, dataExample6, thetaH0 = 0, stage = 2, nPlanned = c(60, 30), pi2 = 0.8, pi1 = 0.4, directionUpper = FALSE, allocationRatioPlanned = 0.5 ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x4' with expected results expect_equal(x4$testActions, c("accept and stop", "accept and stop", NA_character_, NA_character_)) expect_equal(x4$conditionalRejectionProbabilities, c(0, 0, NA_real_, NA_real_)) expect_equal(x4$conditionalPower, c(NA_real_, NA_real_, 0.037603851, 0.34743098), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(-0.14000084, -0.078581193, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.72492429, 0.48870113, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.49999905, 0.49999905, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, 1) expect_equal(x4$finalPValues, c(0.98580558, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(0.039328966, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(0.62730979, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$medianUnbiasedEstimates, c(0.36928105, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$testActions, x4$testActions, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalRejectionProbabilities, x4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPower, x4$conditionalPower, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalLowerBounds, x4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalUpperBounds, x4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedPValues, x4$repeatedPValues, tolerance = 1e-05) expect_equal(x4CodeBased$finalStage, x4$finalStage, tolerance = 1e-05) expect_equal(x4CodeBased$finalPValues, x4$finalPValues, tolerance = 1e-05) expect_equal(x4CodeBased$finalConfidenceIntervalLowerBounds, x4$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$finalConfidenceIntervalUpperBounds, x4$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$medianUnbiasedEstimates, x4$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData5 <- testGetAnalysisResultsPlotData(x4, piTreatmentRange = seq(0.2, 0.5, 0.05), allocationRatioPlanned = 0.5) ## Comparison of the results of list object 'plotData5' with expected results expect_equal(plotData5$xValues, c(0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5), tolerance = 1e-07) expect_equal(plotData5$condPowerValues, c(0.98964948, 0.93182725, 0.78360503, 0.56553646, 0.34743098, 0.18277547, 0.082851862), tolerance = 1e-07) expect_equal(plotData5$likelihoodValues, c(8.9244677e-08, 2.5604189e-06, 4.9816924e-05, 0.00065732471, 0.0058819346, 0.035694195, 0.14689674), tolerance = 1e-07) expect_equal(plotData5$main, "Conditional Power with Likelihood") expect_equal(plotData5$xlab, "pi1") expect_equal(plotData5$ylab, "Conditional power / Likelihood") expect_equal(plotData5$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.8, allocation ratio = 0.5") }) test_that("'getAnalysisResults' for a four-stage Fisher design and two treatments", { .skipTestIfDisabled() design9 <- getDesignFisher( kMax = 4, alpha = 0.025, method = "equalAlpha", informationRates = c(0.2, 0.4, 0.8, 1) ) dataExample7 <- getDataset( n1 = c(17, 23, 22), n2 = c(18, 20, 19), events1 = c(11, 12, 17), events2 = c(5, 10, 7) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionFisherInterim} # @refFS[Formula]{fs:conditionalRejectionFisherLastInterim} # @refFS[Formula]{fs:conditionalRejectionFisherweights} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoRatesEffect} # @refFS[Formula]{fs:conditionalPowerTwoRatesSampleSizes} x1 <- getAnalysisResults(design9, dataExample7, thetaH0 = 0, stage = 2, nPlanned = c(60, 30), pi2 = 0.4, pi1 = 0.8, directionUpper = TRUE, allocationRatioPlanned = 2, seed = 123 ) ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results expect_equal(x1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x1$conditionalRejectionProbabilities, c(0.13898608, 0.050808351, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.023853561, -0.068378457, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.66428984, 0.40418869, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.035427069, 0.088523734, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.925, 0.972), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPowerSimulated, x1$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetAnalysisResultsPlotData(x1, piTreatmentRange = seq(0.5, 0.8, 0.05), allocationRatioPlanned = 2) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.199, 0.364, 0.506, 0.686, 0.839, 0.927, 0.979), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.63105765, 0.95013529, 0.95013529, 0.63105765, 0.27837883, 0.081561833, 0.015871623), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.4, allocation ratio = 2") # reversed "directionUpper" # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeSmaller} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionFisherInterim} # @refFS[Formula]{fs:conditionalRejectionFisherLastInterim} # @refFS[Formula]{fs:conditionalRejectionFisherweights} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoRatesEffect} # @refFS[Formula]{fs:conditionalPowerTwoRatesSampleSizes} x2 <- getAnalysisResults(design9, dataExample7, thetaH0 = 0, stage = 2, nPlanned = c(60, 30), pi2 = 0.8, pi1 = 0.4, directionUpper = FALSE, allocationRatioPlanned = 0.5, seed = 123 ) ## Comparison of the results of AnalysisResultsFisher object 'x2' with expected results expect_equal(x2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(x2$conditionalRejectionProbabilities, c(0.0056634595, 0.0023089469, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.023853561, -0.068378457, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.66428984, 0.40418869, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.49999905, 0.49999905, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.591, 0.788), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$testActions, x2$testActions, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalStage, x2$finalStage, tolerance = 1e-05) expect_equal(x2CodeBased$finalPValues, x2$finalPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalLowerBounds, x2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalUpperBounds, x2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$medianUnbiasedEstimates, x2$medianUnbiasedEstimates, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPowerSimulated, x2$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData2 <- testGetAnalysisResultsPlotData(x2, piTreatmentRange = seq(0.2, 0.5, 0.05), allocationRatioPlanned = 0.5) ## Comparison of the results of list object 'plotData2' with expected results expect_equal(plotData2$xValues, c(0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.998, 0.992, 0.967, 0.892, 0.807, 0.623, 0.493), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(1.003982e-05, 0.00017609247, 0.0020513476, 0.015871623, 0.081561833, 0.27837883, 0.63105765), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power with Likelihood") expect_equal(plotData2$xlab, "pi1") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 90, pi2 = 0.8, allocation ratio = 0.5") }) test_that("'getAnalysisResults' produces the correct exact tests and final CIs", { .skipTestIfDisabled() dataExample8 <- getDataset( n2 = c(31, 72), n1 = c(30, 69), events2 = c(8, 54), events1 = c(6, 45) ) design10 <- getDesignGroupSequential( kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeSmaller} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoRatesEffect} # @refFS[Formula]{fs:conditionalPowerTwoRatesSampleSizes} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoRates} # @refFS[Formula]{fs:medianUnbiasedEstimate} x1 <- getAnalysisResults(design10, dataExample8, thetaH0 = 0, stage = 2, directionUpper = FALSE, normalApproximation = FALSE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results expect_equal(x1$pi1, 0.51515152, tolerance = 1e-07) expect_equal(x1$pi2, 0.60194175, tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "accept")) expect_equal(x1$conditionalRejectionProbabilities, c(0.013966781, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.39509356, -0.22101238), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.29306133, 0.050448655), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.49999905, 0.15271161), tolerance = 1e-07) expect_equal(x1$finalStage, 2) expect_equal(x1$finalPValues, c(NA_real_, 0.13570939), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, -0.21309581), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.059922132), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, -0.076600295), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$pi1, x1$pi1, tolerance = 1e-05) expect_equal(x1CodeBased$pi2, x1$pi2, tolerance = 1e-05) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design11 <- getDesignInverseNormal( kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeSmaller} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoRatesEffect} # @refFS[Formula]{fs:conditionalPowerTwoRatesSampleSizes} x2 <- getAnalysisResults(design11, dataExample8, thetaH0 = 0, stage = 2, directionUpper = FALSE, normalApproximation = FALSE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results expect_equal(x2$pi1, 0.51515152, tolerance = 1e-07) expect_equal(x2$pi2, 0.60194175, tolerance = 1e-07) expect_equal(x2$testActions, c("continue", "accept")) expect_equal(x2$conditionalRejectionProbabilities, c(0.013966781, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.39509356, -0.20744977), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.29306133, 0.038390636), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.49999905, 0.171251), tolerance = 1e-07) expect_equal(x2$finalStage, 2) expect_equal(x2$finalPValues, c(NA_real_, 0.15026298), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, -0.20860056), tolerance = 1e-07) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.064410651), tolerance = 1e-07) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, -0.072106168), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$pi1, x2$pi1, tolerance = 1e-05) expect_equal(x2CodeBased$pi2, x2$pi2, tolerance = 1e-05) expect_equal(x2CodeBased$testActions, x2$testActions, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalStage, x2$finalStage, tolerance = 1e-05) expect_equal(x2CodeBased$finalPValues, x2$finalPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalLowerBounds, x2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalUpperBounds, x2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$medianUnbiasedEstimates, x2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design12 <- getDesignFisher( kMax = 2, alpha = 0.025, method = "fullAlpha", informationRates = c(0.3, 1) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeSmaller} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionFisherInterim} # @refFS[Formula]{fs:conditionalRejectionFisherLastInterim} # @refFS[Formula]{fs:conditionalRejectionFisherweights} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoRatesEffect} # @refFS[Formula]{fs:conditionalPowerTwoRatesSampleSizes} x3 <- getAnalysisResults(design12, dataExample8, thetaH0 = 0, stage = 2, directionUpper = FALSE, normalApproximation = FALSE, seed = 123 ) ## Comparison of the results of AnalysisResultsFisher object 'x3' with expected results expect_equal(x3$pi1, 0.51515152, tolerance = 1e-07) expect_equal(x3$pi2, 0.60194175, tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "accept")) expect_equal(x3$conditionalRejectionProbabilities, c(0.016431334, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(-0.39357809, -0.2198965), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.29140184, 0.047490149), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.49999905, 0.18563047), tolerance = 1e-07) expect_equal(x3$finalStage, 2) expect_equal(x3$finalPValues, c(NA_real_, 0.18562957), tolerance = 1e-07) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$pi1, x3$pi1, tolerance = 1e-05) expect_equal(x3CodeBased$pi2, x3$pi2, tolerance = 1e-05) expect_equal(x3CodeBased$testActions, x3$testActions, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalStage, x3$finalStage, tolerance = 1e-05) expect_equal(x3CodeBased$finalPValues, x3$finalPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalLowerBounds, x3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalUpperBounds, x3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$medianUnbiasedEstimates, x3$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' produces the correct non-inferiority results for a group sequential design", { .skipTestIfDisabled() design13 <- getDesignGroupSequential( kMax = 2, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.3, 1) ) dataExample9 <- getDataset( n1 = c(29, 70), n2 = c(31, 71), events1 = c(8, 54), events2 = c(6, 45) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeGreater} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} x1 <- getAnalysisResults(design13, dataExample9, thetaH0 = -0.1, stage = 2, directionUpper = TRUE, normalApproximation = TRUE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results expect_equal(x1$pi1, 0.62626263, tolerance = 1e-07) expect_equal(x1$pi2, 0.5, tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "reject")) expect_equal(x1$conditionalRejectionProbabilities, c(0.1027905, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(-0.26992436, -0.011398056), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(0.42527258, 0.25916391), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.17488831, 0.00058560119), tolerance = 1e-07) expect_equal(x1$finalStage, 2) expect_equal(x1$finalPValues, c(NA_real_, 0.0012732763), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, -0.016122347), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.26034096), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, 0.12355576), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$pi1, x1$pi1, tolerance = 1e-05) expect_equal(x1CodeBased$pi2, x1$pi2, tolerance = 1e-05) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeGreater} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} x2 <- getAnalysisResults(design13, dataExample9, thetaH0 = -0.1, stage = 1, nPlanned = 40, pi1 = 0.45, pi2 = 0.4, directionUpper = TRUE, normalApproximation = TRUE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results expect_equal(x2$testActions, c("continue", NA_character_)) expect_equal(x2$conditionalRejectionProbabilities, c(0.1027905, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, 0.38169554), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(-0.26992436, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(0.42527258, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.17488831, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$testActions, x2$testActions, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalStage, x2$finalStage, tolerance = 1e-05) expect_equal(x2CodeBased$finalPValues, x2$finalPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalLowerBounds, x2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalUpperBounds, x2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$medianUnbiasedEstimates, x2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetAnalysisResultsPlotData(x2, piTreatmentRange = seq(0.25, 0.7, 0.05)) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.053165998, 0.1027905, 0.17500031, 0.26934912, 0.38169554, 0.50456648, 0.62825352, 0.74249459, 0.83846571, 0.91065807), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.95261056, 0.95859015, 0.67101367, 0.32674624, 0.11068039, 0.026080239, 0.0042749722, 0.00048745649, 3.866511e-05, 2.1334549e-06), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "pi1") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 1, # of remaining subjects = 40, pi2 = 0.4, allocation ratio = 1") # non-inferiority, reversed "directionUpper" # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeSmaller} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} x3 <- getAnalysisResults(design13, dataExample9, thetaH0 = 0.1, stage = 2, directionUpper = FALSE, normalApproximation = TRUE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x3' with expected results expect_equal(x3$pi1, 0.62626263, tolerance = 1e-07) expect_equal(x3$pi2, 0.5, tolerance = 1e-07) expect_equal(x3$testActions, c("continue", "accept")) expect_equal(x3$conditionalRejectionProbabilities, c(0.012395218, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(-0.26992436, -0.011398056), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(0.42527258, 0.25916391), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.49999905, 0.49999905), tolerance = 1e-07) expect_equal(x3$finalStage, 2) expect_equal(x3$finalPValues, c(NA_real_, 0.64703032), tolerance = 1e-07) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, -0.0098227441), tolerance = 1e-07) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, 0.26218829), tolerance = 1e-07) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, 0.12618258), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$pi1, x3$pi1, tolerance = 1e-05) expect_equal(x3CodeBased$pi2, x3$pi2, tolerance = 1e-05) expect_equal(x3CodeBased$testActions, x3$testActions, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalStage, x3$finalStage, tolerance = 1e-05) expect_equal(x3CodeBased$finalPValues, x3$finalPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalLowerBounds, x3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalUpperBounds, x3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$medianUnbiasedEstimates, x3$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeSmaller} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} x4 <- getAnalysisResults(design13, dataExample9, thetaH0 = 0.1, stage = 1, nPlanned = 40, pi1 = 0.4, pi2 = 0.45, directionUpper = FALSE, normalApproximation = TRUE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x4' with expected results expect_equal(x4$testActions, c("continue", NA_character_)) expect_equal(x4$conditionalRejectionProbabilities, c(0.012395218, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalPower, c(NA_real_, 0.10084143), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds, c(-0.26992436, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds, c(0.42527258, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues, c(0.49999905, NA_real_), tolerance = 1e-07) expect_equal(x4$finalStage, NA_integer_) expect_equal(x4$finalPValues, c(NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(x4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(x4$medianUnbiasedEstimates, c(NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$testActions, x4$testActions, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalRejectionProbabilities, x4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPower, x4$conditionalPower, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalLowerBounds, x4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalUpperBounds, x4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedPValues, x4$repeatedPValues, tolerance = 1e-05) expect_equal(x4CodeBased$finalStage, x4$finalStage, tolerance = 1e-05) expect_equal(x4CodeBased$finalPValues, x4$finalPValues, tolerance = 1e-05) expect_equal(x4CodeBased$finalConfidenceIntervalLowerBounds, x4$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$finalConfidenceIntervalUpperBounds, x4$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$medianUnbiasedEstimates, x4$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' with a dataset of rates and without defining a design", { .skipTestIfDisabled() data <- getDataset( n1 = c(10), n2 = c(15), events1 = c(8), events2 = c(6) ) # @refFS[Formula]{fs:testStatisticTwoRatesApproximation} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeGreater} analysisResults1 <- getAnalysisResults(data, alpha = 0.02) ## Comparison of the results of AnalysisResultsInverseNormal object 'analysisResults1' with expected results expect_equal(analysisResults1$pi1, 0.8, tolerance = 1e-07) expect_equal(analysisResults1$pi2, 0.4, tolerance = 1e-07) expect_equal(analysisResults1$testActions, "accept") expect_equal(analysisResults1$repeatedConfidenceIntervalLowerBounds, -0.016534109, tolerance = 1e-07) expect_equal(analysisResults1$repeatedConfidenceIntervalUpperBounds, 0.68698828, tolerance = 1e-07) expect_equal(analysisResults1$repeatedPValues, 0.024199112, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(analysisResults1), NA))) expect_output(print(analysisResults1)$show()) invisible(capture.output(expect_error(summary(analysisResults1), NA))) expect_output(summary(analysisResults1)$show()) analysisResults1CodeBased <- eval(parse(text = getObjectRCode(analysisResults1, stringWrapParagraphWidth = NULL))) expect_equal(analysisResults1CodeBased$pi1, analysisResults1$pi1, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$pi2, analysisResults1$pi2, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$testActions, analysisResults1$testActions, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedConfidenceIntervalLowerBounds, analysisResults1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedConfidenceIntervalUpperBounds, analysisResults1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedPValues, analysisResults1$repeatedPValues, tolerance = 1e-05) expect_type(names(analysisResults1), "character") df <- as.data.frame(analysisResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(analysisResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' produces the correct critical values for a boundary recalculation at the last stage", { .skipTestIfDisabled() data1 <- getDataset( overallN = c(22, 33, 45), overallEvents = c(11, 18, 28) ) data2 <- getDataset( overallN = c(22, 33, 40), overallEvents = c(11, 18, 23) ) data3 <- getDataset( overallN = c(22, 33, 38), overallEvents = c(11, 18, 21) ) design <- getDesignGroupSequential( typeOfDesign = "asP" ) # @refFS[Formula]{fs:getAnalysisResults:maxInformation} # @refFS[Formula]{fs:getAnalysisResults:maxInformation:methods} expect_warning(result1 <- getAnalysisResults(design, data1, thetaH0 = 0.5, maxInformation = 40 )) result2 <- getAnalysisResults(design, data2, thetaH0 = 0.5, maxInformation = 40 ) expect_warning(result3 <- getAnalysisResults(design, data3, thetaH0 = 0.5, maxInformation = 40, informationEpsilon = 2 )) expect_equal(result1$.design$criticalValues[1:2], result2$.design$criticalValues[1:2], tolerance = 1e-07) expect_equal(result1$.design$criticalValues[1:2], result3$.design$criticalValues[1:2], tolerance = 1e-07) expect_equal(result2$.design$criticalValues[1:2], result3$.design$criticalValues[1:2], tolerance = 1e-07) }) rpact/tests/testthat/test-f_analysis_multiarm_rates.R0000644000176200001440000012664114370207346022745 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_analysis_multiarm_rates.R ## | Creation date: 06 February 2023, 12:10:38 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Analysis Rates Functionality for Three or More Treatments") test_that("'getAnalysisResultsMultiArm' with dataset of rates", { design1 <- getDesignInverseNormal( kMax = 4, alpha = 0.02, futilityBounds = c(-0.5, 0, 0.5), bindingFutility = FALSE, typeOfDesign = "asKD", gammaA = 1.2, informationRates = c(0.15, 0.4, 0.7, 1) ) design2 <- getDesignFisher( kMax = 4, alpha = 0.02, alpha0Vec = c(0.7, 0.5, 0.3), method = "equalAlpha", bindingFutility = TRUE, informationRates = c(0.15, 0.4, 0.7, 1) ) design3 <- getDesignConditionalDunnett(alpha = 0.02, informationAtInterim = 0.4, secondStageConditioning = TRUE) # directionUpper = TRUE dataExample1 <- getDataset( n1 = c(23, 25), n2 = c(25, NA), n3 = c(22, 29), events1 = c(15, 12), events2 = c(19, NA), events3 = c(12, 13) ) # directionUpper = FALSE dataExample2 <- getDataset( n1 = c(23, 25), n2 = c(25, NA), n3 = c(22, 29), events1 = c(15, 12), events2 = c(19, NA), events3 = c(21, 25) ) # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmRates} results1 <- getAnalysisResults(design = design1, dataInput = dataExample1, intersectionTest = "Simes", nPlanned = c(20, 20), normalApproximation = FALSE, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results1' with expected results expect_equal(results1$piTreatments[1, ], 0.5625, tolerance = 1e-05) expect_equal(results1$piTreatments[2, ], NA_real_) expect_equal(results1$piControl[1, ], 0.49019608, tolerance = 1e-05) expect_equal(results1$conditionalRejectionProbabilities[1, ], c(0.015420568, 0.003193865, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$conditionalRejectionProbabilities[2, ], c(0.024462749, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$conditionalPower[1, ], c(NA_real_, NA_real_, 0.0010766875, 0.011284717), tolerance = 1e-05) expect_equal(results1$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(results1$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.32184855, -0.20584893, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.20645613, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalUpperBounds[1, ], c(0.5011587, 0.32866179, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalUpperBounds[2, ], c(0.57764375, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedPValues[1, ], c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedPValues[2, ], c(0.5, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results1), NA))) expect_output(print(results1)$show()) invisible(capture.output(expect_error(summary(results1), NA))) expect_output(summary(results1)$show()) results1CodeBased <- eval(parse(text = getObjectRCode(results1, stringWrapParagraphWidth = NULL))) expect_equal(results1CodeBased$piTreatments, results1$piTreatments, tolerance = 1e-05) expect_equal(results1CodeBased$piControl, results1$piControl, tolerance = 1e-05) expect_equal(results1CodeBased$conditionalRejectionProbabilities, results1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results1CodeBased$conditionalPower, results1$conditionalPower, tolerance = 1e-05) expect_equal(results1CodeBased$repeatedConfidenceIntervalLowerBounds, results1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results1CodeBased$repeatedConfidenceIntervalUpperBounds, results1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results1CodeBased$repeatedPValues, results1$repeatedPValues, tolerance = 1e-05) expect_type(names(results1), "character") df <- as.data.frame(results1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmRates} results2 <- getAnalysisResults(design = design1, dataInput = dataExample1, intersectionTest = "Dunnett", nPlanned = c(20, 20), normalApproximation = TRUE, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results2' with expected results expect_equal(results2$piTreatments[1, ], 0.5625, tolerance = 1e-05) expect_equal(results2$piTreatments[2, ], NA_real_) expect_equal(results2$piControl[1, ], 0.49019608, tolerance = 1e-05) expect_equal(results2$conditionalRejectionProbabilities[1, ], c(0.022712676, 0.0087985227, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$conditionalRejectionProbabilities[2, ], c(0.043097831, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$conditionalPower[1, ], c(NA_real_, NA_real_, 0.004624756, 0.026737358), tolerance = 1e-05) expect_equal(results2$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(results2$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.3206942, -0.20381953, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.2052416, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalUpperBounds[1, ], c(0.50018786, 0.32441792, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalUpperBounds[2, ], c(0.57677219, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedPValues[1, ], c(0.5, 0.5, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedPValues[2, ], c(0.5, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results2), NA))) expect_output(print(results2)$show()) invisible(capture.output(expect_error(summary(results2), NA))) expect_output(summary(results2)$show()) results2CodeBased <- eval(parse(text = getObjectRCode(results2, stringWrapParagraphWidth = NULL))) expect_equal(results2CodeBased$piTreatments, results2$piTreatments, tolerance = 1e-05) expect_equal(results2CodeBased$piControl, results2$piControl, tolerance = 1e-05) expect_equal(results2CodeBased$conditionalRejectionProbabilities, results2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results2CodeBased$conditionalPower, results2$conditionalPower, tolerance = 1e-05) expect_equal(results2CodeBased$repeatedConfidenceIntervalLowerBounds, results2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results2CodeBased$repeatedConfidenceIntervalUpperBounds, results2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results2CodeBased$repeatedPValues, results2$repeatedPValues, tolerance = 1e-05) expect_type(names(results2), "character") df <- as.data.frame(results2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmRates} results3 <- getAnalysisResults(design = design2, dataInput = dataExample1, intersectionTest = "Simes", nPlanned = c(20, 20), seed = 123, iterations = 1000, normalApproximation = FALSE, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results3' with expected results expect_equal(results3$piTreatments[1, ], 0.5625, tolerance = 1e-05) expect_equal(results3$piTreatments[2, ], NA_real_) expect_equal(results3$piControl[1, ], 0.49019608, tolerance = 1e-05) expect_equal(results3$conditionalRejectionProbabilities[1, ], c(0.011503611, 0, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results3$conditionalRejectionProbabilities[2, ], c(0.015301846, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results3$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.26319109, -0.20678373, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results3$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.14541584, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results3$repeatedConfidenceIntervalUpperBounds[1, ], c(0.45121457, 0.32319296, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results3$repeatedConfidenceIntervalUpperBounds[2, ], c(0.53261778, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results3$repeatedPValues[1, ], c(0.4416362, 0.4416362, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results3$repeatedPValues[2, ], c(0.31730879, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results3$conditionalPowerSimulated[1, ], c(NA_real_, NA_real_, 0, 0)) expect_equal(results3$conditionalPowerSimulated[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results3), NA))) expect_output(print(results3)$show()) invisible(capture.output(expect_error(summary(results3), NA))) expect_output(summary(results3)$show()) results3CodeBased <- eval(parse(text = getObjectRCode(results3, stringWrapParagraphWidth = NULL))) expect_equal(results3CodeBased$piTreatments, results3$piTreatments, tolerance = 1e-05) expect_equal(results3CodeBased$piControl, results3$piControl, tolerance = 1e-05) expect_equal(results3CodeBased$conditionalRejectionProbabilities, results3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results3CodeBased$repeatedConfidenceIntervalLowerBounds, results3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results3CodeBased$repeatedConfidenceIntervalUpperBounds, results3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results3CodeBased$repeatedPValues, results3$repeatedPValues, tolerance = 1e-05) expect_equal(results3CodeBased$conditionalPowerSimulated, results3$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(results3), "character") df <- as.data.frame(results3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmRates} results4 <- getAnalysisResults(design = design2, dataInput = dataExample1, intersectionTest = "Dunnett", nPlanned = c(20, 20), seed = 123, iterations = 1000, normalApproximation = TRUE, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results4' with expected results expect_equal(results4$piTreatments[1, ], 0.5625, tolerance = 1e-05) expect_equal(results4$piTreatments[2, ], NA_real_) expect_equal(results4$piControl[1, ], 0.49019608, tolerance = 1e-05) expect_equal(results4$conditionalRejectionProbabilities[1, ], c(0.014541388, 0.0059378141, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$conditionalRejectionProbabilities[2, ], c(0.024268969, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.26076213, -0.20472006, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.14291708, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalUpperBounds[1, ], c(0.44911894, 0.31972469, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalUpperBounds[2, ], c(0.53072029, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedPValues[1, ], c(0.3372539, 0.3372539, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedPValues[2, ], c(0.17782371, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$conditionalPowerSimulated[1, ], c(NA_real_, NA_real_, 0.011, 0.018), tolerance = 1e-05) expect_equal(results4$conditionalPowerSimulated[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results4), NA))) expect_output(print(results4)$show()) invisible(capture.output(expect_error(summary(results4), NA))) expect_output(summary(results4)$show()) results4CodeBased <- eval(parse(text = getObjectRCode(results4, stringWrapParagraphWidth = NULL))) expect_equal(results4CodeBased$piTreatments, results4$piTreatments, tolerance = 1e-05) expect_equal(results4CodeBased$piControl, results4$piControl, tolerance = 1e-05) expect_equal(results4CodeBased$conditionalRejectionProbabilities, results4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results4CodeBased$repeatedConfidenceIntervalLowerBounds, results4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results4CodeBased$repeatedConfidenceIntervalUpperBounds, results4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results4CodeBased$repeatedPValues, results4$repeatedPValues, tolerance = 1e-05) expect_equal(results4CodeBased$conditionalPowerSimulated, results4$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(results4), "character") df <- as.data.frame(results4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmRates} results5 <- getAnalysisResults(design = design3, dataInput = dataExample1, intersectionTest = "Dunnett", normalApproximation = TRUE, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsConditionalDunnett object 'results5' with expected results expect_equal(results5$piTreatments[1, ], 0.5625, tolerance = 1e-05) expect_equal(results5$piTreatments[2, ], NA_real_) expect_equal(results5$piControl[1, ], 0.49019608, tolerance = 1e-05) expect_equal(results5$conditionalRejectionProbabilities[1, ], c(NA_real_, 0.019942093), tolerance = 1e-05) expect_equal(results5$conditionalRejectionProbabilities[2, ], c(NA_real_, 0.049973806), tolerance = 1e-05) expect_equal(results5$conditionalPower[1, ], c(NA_real_, NA_real_)) expect_equal(results5$conditionalPower[2, ], c(NA_real_, NA_real_)) expect_equal(results5$repeatedConfidenceIntervalLowerBounds[1, ], c(NA_real_, -0.10423565), tolerance = 1e-05) expect_equal(results5$repeatedConfidenceIntervalLowerBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results5$repeatedConfidenceIntervalUpperBounds[1, ], c(NA_real_, 0.28064632), tolerance = 1e-05) expect_equal(results5$repeatedConfidenceIntervalUpperBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results5$repeatedPValues[1, ], c(NA_real_, 0.26025152), tolerance = 1e-05) expect_equal(results5$repeatedPValues[2, ], c(NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results5), NA))) expect_output(print(results5)$show()) invisible(capture.output(expect_error(summary(results5), NA))) expect_output(summary(results5)$show()) results5CodeBased <- eval(parse(text = getObjectRCode(results5, stringWrapParagraphWidth = NULL))) expect_equal(results5CodeBased$piTreatments, results5$piTreatments, tolerance = 1e-05) expect_equal(results5CodeBased$piControl, results5$piControl, tolerance = 1e-05) expect_equal(results5CodeBased$conditionalRejectionProbabilities, results5$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results5CodeBased$conditionalPower, results5$conditionalPower, tolerance = 1e-05) expect_equal(results5CodeBased$repeatedConfidenceIntervalLowerBounds, results5$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results5CodeBased$repeatedConfidenceIntervalUpperBounds, results5$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results5CodeBased$repeatedPValues, results5$repeatedPValues, tolerance = 1e-05) expect_type(names(results5), "character") df <- as.data.frame(results5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmRates} results6 <- getAnalysisResults(design = design1, dataInput = dataExample2, intersectionTest = "Simes", nPlanned = c(20, 20), normalApproximation = TRUE, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results6' with expected results expect_equal(results6$piTreatments[1, ], 0.5625, tolerance = 1e-05) expect_equal(results6$piTreatments[2, ], NA_real_) expect_equal(results6$piControl[1, ], 0.90196078, tolerance = 1e-05) expect_equal(results6$conditionalRejectionProbabilities[1, ], c(0.13434137, 0.80112393, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results6$conditionalRejectionProbabilities[2, ], c(0.086909033, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results6$conditionalPower[1, ], c(NA_real_, NA_real_, 0.99558173, 0.99935678), tolerance = 1e-05) expect_equal(results6$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(results6$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.62349618, -0.55900271, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results6$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.51524937, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results6$repeatedConfidenceIntervalUpperBounds[1, ], c(0.08041061, -0.10884679, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results6$repeatedConfidenceIntervalUpperBounds[2, ], c(0.16732342, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results6$repeatedPValues[1, ], c(0.10960848, 0.00033097065, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results6$repeatedPValues[2, ], c(0.30001108, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results6), NA))) expect_output(print(results6)$show()) invisible(capture.output(expect_error(summary(results6), NA))) expect_output(summary(results6)$show()) results6CodeBased <- eval(parse(text = getObjectRCode(results6, stringWrapParagraphWidth = NULL))) expect_equal(results6CodeBased$piTreatments, results6$piTreatments, tolerance = 1e-05) expect_equal(results6CodeBased$piControl, results6$piControl, tolerance = 1e-05) expect_equal(results6CodeBased$conditionalRejectionProbabilities, results6$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results6CodeBased$conditionalPower, results6$conditionalPower, tolerance = 1e-05) expect_equal(results6CodeBased$repeatedConfidenceIntervalLowerBounds, results6$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results6CodeBased$repeatedConfidenceIntervalUpperBounds, results6$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results6CodeBased$repeatedPValues, results6$repeatedPValues, tolerance = 1e-05) expect_type(names(results6), "character") df <- as.data.frame(results6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmRates} results7 <- getAnalysisResults(design = design1, dataInput = dataExample2, intersectionTest = "Dunnett", nPlanned = c(20, 20), normalApproximation = TRUE, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results7' with expected results expect_equal(results7$piTreatments[1, ], 0.5625, tolerance = 1e-05) expect_equal(results7$piTreatments[2, ], NA_real_) expect_equal(results7$piControl[1, ], 0.90196078, tolerance = 1e-05) expect_equal(results7$conditionalRejectionProbabilities[1, ], c(0.13739667, 0.80531488, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results7$conditionalRejectionProbabilities[2, ], c(0.086909033, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results7$conditionalPower[1, ], c(NA_real_, NA_real_, 0.99579217, 0.99938978), tolerance = 1e-05) expect_equal(results7$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(results7$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.62267686, -0.55784951, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results7$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.5143226, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results7$repeatedConfidenceIntervalUpperBounds[1, ], c(0.079007072, -0.11253618, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results7$repeatedConfidenceIntervalUpperBounds[2, ], c(0.16597626, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results7$repeatedPValues[1, ], c(0.10337051, 0.00031285088, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results7$repeatedPValues[2, ], c(0.30001108, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results7), NA))) expect_output(print(results7)$show()) invisible(capture.output(expect_error(summary(results7), NA))) expect_output(summary(results7)$show()) results7CodeBased <- eval(parse(text = getObjectRCode(results7, stringWrapParagraphWidth = NULL))) expect_equal(results7CodeBased$piTreatments, results7$piTreatments, tolerance = 1e-05) expect_equal(results7CodeBased$piControl, results7$piControl, tolerance = 1e-05) expect_equal(results7CodeBased$conditionalRejectionProbabilities, results7$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results7CodeBased$conditionalPower, results7$conditionalPower, tolerance = 1e-05) expect_equal(results7CodeBased$repeatedConfidenceIntervalLowerBounds, results7$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results7CodeBased$repeatedConfidenceIntervalUpperBounds, results7$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results7CodeBased$repeatedPValues, results7$repeatedPValues, tolerance = 1e-05) expect_type(names(results7), "character") df <- as.data.frame(results7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmRates} results8 <- getAnalysisResults(design = design2, dataInput = dataExample2, intersectionTest = "Simes", nPlanned = c(20, 20), seed = 123, iterations = 1000, normalApproximation = TRUE, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results8' with expected results expect_equal(results8$piTreatments[1, ], 0.5625, tolerance = 1e-05) expect_equal(results8$piTreatments[2, ], NA_real_) expect_equal(results8$piControl[1, ], 0.90196078, tolerance = 1e-05) expect_equal(results8$conditionalRejectionProbabilities[1, ], c(0.10173644, 1, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results8$conditionalRejectionProbabilities[2, ], c(0.053203298, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results8$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.58125932, -0.55861966, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results8$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.46821261, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results8$repeatedConfidenceIntervalUpperBounds[1, ], c(0.011590857, -0.11157179, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results8$repeatedConfidenceIntervalUpperBounds[2, ], c(0.10089066, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results8$repeatedPValues[1, ], c(0.024755475, 0.00046257745, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results8$repeatedPValues[2, ], c(0.061679763, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results8$conditionalPowerSimulated[1, ], c(NA_real_, NA_real_, 1, 1)) expect_equal(results8$conditionalPowerSimulated[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results8), NA))) expect_output(print(results8)$show()) invisible(capture.output(expect_error(summary(results8), NA))) expect_output(summary(results8)$show()) results8CodeBased <- eval(parse(text = getObjectRCode(results8, stringWrapParagraphWidth = NULL))) expect_equal(results8CodeBased$piTreatments, results8$piTreatments, tolerance = 1e-05) expect_equal(results8CodeBased$piControl, results8$piControl, tolerance = 1e-05) expect_equal(results8CodeBased$conditionalRejectionProbabilities, results8$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results8CodeBased$repeatedConfidenceIntervalLowerBounds, results8$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results8CodeBased$repeatedConfidenceIntervalUpperBounds, results8$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results8CodeBased$repeatedPValues, results8$repeatedPValues, tolerance = 1e-05) expect_equal(results8CodeBased$conditionalPowerSimulated, results8$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(results8), "character") df <- as.data.frame(results8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmRates} results9 <- getAnalysisResults(design = design2, dataInput = dataExample2, intersectionTest = "Dunnett", nPlanned = c(20, 20), seed = 123, iterations = 1000, normalApproximation = TRUE, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results9' with expected results expect_equal(results9$piTreatments[1, ], 0.5625, tolerance = 1e-05) expect_equal(results9$piTreatments[2, ], NA_real_) expect_equal(results9$piControl[1, ], 0.90196078, tolerance = 1e-05) expect_equal(results9$conditionalRejectionProbabilities[1, ], c(0.10565624, 1, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results9$conditionalRejectionProbabilities[2, ], c(0.053203298, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results9$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.57948552, -0.55733034, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results9$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.4662704, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results9$repeatedConfidenceIntervalUpperBounds[1, ], c(0.0088609184, -0.11474637, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results9$repeatedConfidenceIntervalUpperBounds[2, ], c(0.098238963, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results9$repeatedPValues[1, ], c(0.023456573, 0.000443504, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results9$repeatedPValues[2, ], c(0.061679763, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results9$conditionalPowerSimulated[1, ], c(NA_real_, NA_real_, 1, 1)) expect_equal(results9$conditionalPowerSimulated[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results9), NA))) expect_output(print(results9)$show()) invisible(capture.output(expect_error(summary(results9), NA))) expect_output(summary(results9)$show()) results9CodeBased <- eval(parse(text = getObjectRCode(results9, stringWrapParagraphWidth = NULL))) expect_equal(results9CodeBased$piTreatments, results9$piTreatments, tolerance = 1e-05) expect_equal(results9CodeBased$piControl, results9$piControl, tolerance = 1e-05) expect_equal(results9CodeBased$conditionalRejectionProbabilities, results9$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results9CodeBased$repeatedConfidenceIntervalLowerBounds, results9$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results9CodeBased$repeatedConfidenceIntervalUpperBounds, results9$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results9CodeBased$repeatedPValues, results9$repeatedPValues, tolerance = 1e-05) expect_equal(results9CodeBased$conditionalPowerSimulated, results9$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(results9), "character") df <- as.data.frame(results9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmRates} results10 <- getAnalysisResults(design = design3, dataInput = dataExample2, intersectionTest = "Dunnett", normalApproximation = TRUE, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsConditionalDunnett object 'results10' with expected results expect_equal(results10$piTreatments[1, ], 0.5625, tolerance = 1e-05) expect_equal(results10$piTreatments[2, ], NA_real_) expect_equal(results10$piControl[1, ], 0.90196078, tolerance = 1e-05) expect_equal(results10$conditionalRejectionProbabilities[1, ], c(NA_real_, 0.21935683), tolerance = 1e-05) expect_equal(results10$conditionalRejectionProbabilities[2, ], c(NA_real_, 0.13026808), tolerance = 1e-05) expect_equal(results10$conditionalPower[1, ], c(NA_real_, NA_real_)) expect_equal(results10$conditionalPower[2, ], c(NA_real_, NA_real_)) expect_equal(results10$repeatedConfidenceIntervalLowerBounds[1, ], c(NA_real_, -0.46994305), tolerance = 1e-05) expect_equal(results10$repeatedConfidenceIntervalLowerBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results10$repeatedConfidenceIntervalUpperBounds[1, ], c(NA_real_, -0.15490055), tolerance = 1e-05) expect_equal(results10$repeatedConfidenceIntervalUpperBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results10$repeatedPValues[1, ], c(NA_real_, 7.2525431e-05), tolerance = 1e-05) expect_equal(results10$repeatedPValues[2, ], c(NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results10), NA))) expect_output(print(results10)$show()) invisible(capture.output(expect_error(summary(results10), NA))) expect_output(summary(results10)$show()) results10CodeBased <- eval(parse(text = getObjectRCode(results10, stringWrapParagraphWidth = NULL))) expect_equal(results10CodeBased$piTreatments, results10$piTreatments, tolerance = 1e-05) expect_equal(results10CodeBased$piControl, results10$piControl, tolerance = 1e-05) expect_equal(results10CodeBased$conditionalRejectionProbabilities, results10$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results10CodeBased$conditionalPower, results10$conditionalPower, tolerance = 1e-05) expect_equal(results10CodeBased$repeatedConfidenceIntervalLowerBounds, results10$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results10CodeBased$repeatedConfidenceIntervalUpperBounds, results10$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results10CodeBased$repeatedPValues, results10$repeatedPValues, tolerance = 1e-05) expect_type(names(results10), "character") df <- as.data.frame(results10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) rpact/tests/testthat/test-f_quality_assurance.R0000644000176200001440000000373314446750002021536 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_design_utilities.R ## | Creation date: 06 February 2023, 12:13:45 ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Quality Assurance Functions") test_that("Quality assurance functions throw errors when arguments are missing or wrong", { rVersion <- .isMinimumRVersion4() expect_true(rVersion) dummyContent <- "[ OK: 6 ] [FAILED: 3]" res_1 <- .getTestthatResultLine(dummyContent) expect_type(res_1, "character") expect_equal(2 * 2, 4) res_2 <- .getTestthatResultNumberOfFailures(dummyContent) expect_type(res_2, "character") expect_equal(2 * 2, 4) res_3 <- .getTestthatResultNumberOfSkippedTests(dummyContent) expect_type(res_3, "character") expect_equal(2 * 2, 4) expect_error(.downloadUnitTests( testFileTargetDirectory = NULL, token = "token", secret = "secret", connectionType = "pkg")) expect_error(.prepareUnitTestFiles()) expect_error(.downloadUnitTestsViaHttp()) expect_error(.downloadUnitTestsViaFtp()) expect_error(.getConnectionArgument()) expect_error(testPackage(NULL)) expect_error(.testInstalledPackage(NULL)) expect_type(.isCompleteUnitTestSetEnabled(), "logical") })rpact/tests/testthat/test-class_core_plot_settings.R0000644000176200001440000000210014446750002022553 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-class_analysis_dataset.R ## | Creation date: 06 February 2023, 12:04:06 ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Class 'PlotSettings'") test_that("Test plot settings", { expect_error(PlotSubTitleItem()) expect_type(PlotSubTitleItems(), "S4") expect_type(getPlotSettings(), "S4") expect_type(PlotSettings(), "S4") })rpact/tests/testthat/helper-f_analysis_base_survival.R0000644000176200001440000000417614277150417023062 0ustar liggesusers## | ## | *Unit tests helper functions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 6117 $ ## | Last changed: $Date: 2022-05-04 15:55:23 +0200 (Mi, 04 Mai 2022) $ ## | Last changed by: $Author: pahlke $ ## | testGetAnalysisResultsPlotData <- function(x, ..., nPlanned = NA_real_, stage = NA_integer_, allocationRatioPlanned = NA_real_) { plotArgs <- .getAnalysisResultsPlotArguments( x = x, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) if (x$getDataInput()$isDatasetMeans()) { assumedStDev <- .getOptionalArgument("assumedStDev", ...) if (is.null(assumedStDev)) { assumedStDev <- x$assumedStDev return(.getConditionalPowerPlot( stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, allocationRatioPlanned = plotArgs$allocationRatioPlanned, assumedStDev = assumedStDev, ... )) } } else if (x$getDataInput()$isDatasetRates()) { pi2 <- .getOptionalArgument("pi2", ...) if (is.null(pi2)) { pi2 <- x$pi2 return(.getConditionalPowerPlot( stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, allocationRatioPlanned = plotArgs$allocationRatioPlanned, pi2 = pi2, ... )) } } return(.getConditionalPowerPlot( stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, allocationRatioPlanned = plotArgs$allocationRatioPlanned, ... )) } rpact/tests/testthat/test-f_design_utilities.R0000644000176200001440000004012514370207346021346 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_design_utilities.R ## | Creation date: 06 February 2023, 12:13:45 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Design Utility Functions") test_that("'getPiByLambda' and 'getLambdaByPi' produce corresponding results", { expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 1, kappa = 1), eventTime = 1, kappa = 1), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 1, kappa = 3), eventTime = 1, kappa = 3), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 1, kappa = 5), eventTime = 1, kappa = 5), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 6, kappa = 1), eventTime = 6, kappa = 1), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 6, kappa = 3), eventTime = 6, kappa = 3), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 6, kappa = 5), eventTime = 6, kappa = 5), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 6, kappa = 5), eventTime = 6, kappa = 5), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 6, kappa = 5), eventTime = 6, kappa = 5), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 6, kappa = 5), eventTime = 6, kappa = 5), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 6, kappa = 5), eventTime = 6, kappa = 5), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 11, kappa = 1), eventTime = 11, kappa = 1), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 11, kappa = 3), eventTime = 11, kappa = 3), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 11, kappa = 3), eventTime = 11, kappa = 3), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 11, kappa = 3), eventTime = 11, kappa = 3), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 11, kappa = 3), eventTime = 11, kappa = 3), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 11, kappa = 3), eventTime = 11, kappa = 3), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 11, kappa = 5), eventTime = 11, kappa = 5), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 11, kappa = 5), eventTime = 11, kappa = 5), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 11, kappa = 5), eventTime = 11, kappa = 5), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 11, kappa = 5), eventTime = 11, kappa = 5), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 11, kappa = 5), eventTime = 11, kappa = 5), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 16, kappa = 1), eventTime = 16, kappa = 1), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 16, kappa = 3), eventTime = 16, kappa = 3), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 16, kappa = 3), eventTime = 16, kappa = 3), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 16, kappa = 3), eventTime = 16, kappa = 3), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 16, kappa = 3), eventTime = 16, kappa = 3), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 16, kappa = 3), eventTime = 16, kappa = 3), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 16, kappa = 5), eventTime = 16, kappa = 5), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 16, kappa = 5), eventTime = 16, kappa = 5), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 16, kappa = 5), eventTime = 16, kappa = 5), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 16, kappa = 5), eventTime = 16, kappa = 5), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 16, kappa = 5), eventTime = 16, kappa = 5), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 21, kappa = 1), eventTime = 21, kappa = 1), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 21, kappa = 1), eventTime = 21, kappa = 1), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 21, kappa = 1), eventTime = 21, kappa = 1), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 21, kappa = 1), eventTime = 21, kappa = 1), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 21, kappa = 1), eventTime = 21, kappa = 1), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 21, kappa = 3), eventTime = 21, kappa = 3), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 21, kappa = 3), eventTime = 21, kappa = 3), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 21, kappa = 3), eventTime = 21, kappa = 3), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 21, kappa = 3), eventTime = 21, kappa = 3), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 21, kappa = 3), eventTime = 21, kappa = 3), 0.09, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.01, eventTime = 21, kappa = 5), eventTime = 21, kappa = 5), 0.01, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.03, eventTime = 21, kappa = 5), eventTime = 21, kappa = 5), 0.03, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.05, eventTime = 21, kappa = 5), eventTime = 21, kappa = 5), 0.05, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.07, eventTime = 21, kappa = 5), eventTime = 21, kappa = 5), 0.07, tolerance = 1e-04) expect_equal(getLambdaByPi(getPiByLambda(0.09, eventTime = 21, kappa = 5), eventTime = 21, kappa = 5), 0.09, tolerance = 1e-04) }) test_that("'getPiecewiseExponentialDistribution' and 'getPiecewiseExponentialQuantile' produce corresponding results", { # @refFS[Formula]{fs:pieceWiseExponentialSurvival} piecewiseLambda <- c(0.03, 0.05, 0.08) piecewiseSurvivalTime <- c(0, 16, 22) time <- seq(2, 50, 4) quantile <- getPiecewiseExponentialDistribution(time, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda ) y <- getPiecewiseExponentialQuantile(quantile, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda ) expect_equal(y, time, tolerance = 1e-06) }) test_that("'ppwexp' and 'qpwexp' produce corresponding results", { # @refFS[Formula]{fs:pieceWiseExponentialSurvival} piecewiseLambda <- c(0.03, 0.05, 0.08) piecewiseSurvivalTime <- c(0, 16, 22) time <- seq(2, 50, 4) quantile <- ppwexp(time, s = piecewiseSurvivalTime, lambda = piecewiseLambda ) y <- qpwexp(quantile, s = piecewiseSurvivalTime, lambda = piecewiseLambda ) expect_equal(y, time, tolerance = 1e-06) }) test_that("'getPiecewiseExponentialDistribution' and 'getPiecewiseExponentialQuantile' produce corresponding results ('piecewiseSurvivalTime' defined as list)", { # @refFS[Formula]{fs:pieceWiseExponentialSurvival} piecewiseSurvivalTime <- list( "<16" = 0.03, "16 - <22" = 0.05, ">=22" = 0.08 ) time <- seq(2, 50, 4) quantile <- getPiecewiseExponentialDistribution(time, piecewiseSurvivalTime = piecewiseSurvivalTime ) y <- getPiecewiseExponentialQuantile(quantile, piecewiseSurvivalTime = piecewiseSurvivalTime ) expect_equal(y, time, tolerance = 1e-06) }) test_that("'ppwexp' and 'qpwexp' produce corresponding results ('piecewiseSurvivalTime' defined as list)", { # @refFS[Formula]{fs:pieceWiseExponentialSurvival} piecewiseSurvivalTime <- list( "<16" = 0.03, "16 - <22" = 0.05, ">=22" = 0.08 ) time <- seq(2, 50, 4) quantile <- ppwexp(time, s = piecewiseSurvivalTime) y <- qpwexp(quantile, s = piecewiseSurvivalTime) expect_equal(y, time, tolerance = 1e-06) }) test_that("'getPiecewiseExponentialRandomNumbers': test that mean random numbers are as expected", { # @refFS[Formula]{fs:pieceWiseExponentialSurvival} set.seed(123456) piecewiseSurvivalTime <- c(0, 16, 22) piecewiseLambda <- c(0.003, 0.003, 0.003) y <- 1 / mean(getPiecewiseExponentialRandomNumbers(100000, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda, kappa = 1 )) expect_equal(y, piecewiseLambda[1], tolerance = 5e-04) }) test_that("'rpwexp': test that mean random numbers are as expected", { # @refFS[Formula]{fs:pieceWiseExponentialSurvival} set.seed(123456) piecewiseSurvivalTime <- c(0, 16, 22) piecewiseLambda <- c(0.003, 0.003, 0.003) y <- 1 / mean(rpwexp(100000, s = piecewiseSurvivalTime, lambda = piecewiseLambda, kappa = 1)) expect_equal(y, piecewiseLambda[1], tolerance = 5e-04) }) test_that("'getPiecewiseExponentialRandomNumbers': test that mean random numbers are as expected ('piecewiseSurvivalTime' defined as list)", { # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} set.seed(123456) piecewiseSurvivalTime <- list( "<16" = 0.003, "16 - <22" = 0.003, ">=22" = 0.003 ) y <- 1 / mean(getPiecewiseExponentialRandomNumbers(100000, piecewiseSurvivalTime = piecewiseSurvivalTime, kappa = 1 )) expect_equal(y, 0.003, tolerance = 5e-04) }) test_that("'rpwexp': test that mean random numbers are as expected ('piecewiseSurvivalTime' defined as list)", { # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} set.seed(123456) piecewiseSurvivalTime <- list( "<16" = 0.003, "16 - <22" = 0.003, ">=22" = 0.003 ) y <- 1 / mean(rpwexp(100000, s = piecewiseSurvivalTime, kappa = 1)) expect_equal(y, 0.003, tolerance = 5e-04) }) test_that("'getPiecewiseExponentialDistribution': test that function call with singel lambda is working", { expect_equal(getPiecewiseExponentialDistribution(4, piecewiseLambda = 0.003), 0.01192829, tolerance = 5e-05) }) test_that("'.convertStageWiseToOverallValues': test that function is working as expected", { x1 <- .convertStageWiseToOverallValues(c(1:5)) ## Comparison of the results of matrixarray object 'x1' with expected results expect_equal(x1[1, ], 1) expect_equal(x1[2, ], 3) expect_equal(x1[3, ], 6) expect_equal(x1[4, ], 10) expect_equal(x1[5, ], 15) x2 <- .convertStageWiseToOverallValues(matrix(c(1:5), ncol = 1)) ## Comparison of the results of matrixarray object 'x2' with expected results expect_equal(x2[1, ], 1) expect_equal(x2[2, ], 3) expect_equal(x2[3, ], 6) expect_equal(x2[4, ], 10) expect_equal(x2[5, ], 15) x3 <- .convertStageWiseToOverallValues(matrix(c(1:5), nrow = 1)) ## Comparison of the results of matrixarray object 'x3' with expected results expect_equal(x3[1, ], c(1, 2, 3, 4, 5)) x4 <- .convertStageWiseToOverallValues(matrix(c(1:5, 1:5), ncol = 2)) ## Comparison of the results of matrixarray object 'x4' with expected results expect_equal(x4[1, ], c(1, 1)) expect_equal(x4[2, ], c(3, 3)) expect_equal(x4[3, ], c(6, 6)) expect_equal(x4[4, ], c(10, 10)) expect_equal(x4[5, ], c(15, 15)) x5 <- .convertStageWiseToOverallValues(matrix(sort(rep(1:5, 2)), nrow = 2)) ## Comparison of the results of matrixarray object 'x5' with expected results expect_equal(x5[1, ], c(1, 2, 3, 4, 5)) expect_equal(x5[2, ], c(2, 4, 6, 8, 10)) }) rpact/tests/testthat/test-pkgname.R0000644000176200001440000000200614446750002017107 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_simulation_multiarm_survival.R ## | Creation date: 06 February 2023, 12:14:51 ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Package Functions") test_that("pkgname.R", { expect_true(is.function(.onAttach)) expect_true(is.function(.onUnload)) expect_true(is.function(.onDetach)) })rpact/tests/testthat/helper-f_core_output_formats.R0000644000176200001440000000627114446750002022405 0ustar liggesusers## | ## | *Unit tests helper functions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | .assertIsValidOutputFormatOptionValue <- function(optionKey, optionValue) { if (is.null(optionValue) || length(optionValue) == 0 || nchar(trimws(optionValue)) == 0) { return(invisible()) } C_OUTPUT_FORMAT_ARGUMENTS <- rpact:::C_OUTPUT_FORMAT_ARGUMENTS C_ROUND_FUNCTIONS <- rpact:::C_ROUND_FUNCTIONS parts <- base::strsplit(optionValue, " *, *", fixed = FALSE)[[1]] if (length(parts) == 0) { stop( "the value (", optionValue, ") of output format option '", optionKey, "' is invalid" ) } for (part in parts) { if (!grepl(" *= *", part)) { stop( "'", optionKey, "' (", part, ") must contain a valid argument-value-pair: \"argument = value\"" ) } keyValuePair <- base::strsplit(part, " *= *", fixed = FALSE)[[1]] if (length(keyValuePair) != 2) { stop( "'", optionKey, "' contains an invalid argument-value-pair: ", part ) } key <- trimws(keyValuePair[1]) if (nchar(key) == 0) { stop( "'", optionKey, "' contains an invalid argument" ) } if (!(key %in% C_OUTPUT_FORMAT_ARGUMENTS)) { stop( "'", optionKey, "' contains an invalid argument: '", key, "'" ) } value <- trimws(keyValuePair[2]) if (nchar(value) == 0) { stop( "'", optionKey, "' contains an invalid value" ) } if (key %in% c("digits", "nsmall")) { if (grepl("\\D", value)) { stop( "the value (", value, ") of '", optionKey, "' must be an integer value" ) } } else if (key %in% c("roundFunction")) { if (!(value %in% C_ROUND_FUNCTIONS)) { stop( "the value (", value, ") of '", optionKey, "' must be one of these character values: ", .arrayToString(C_ROUND_FUNCTIONS, encapsulate = TRUE) ) } } else if (key %in% c("trimSingleZeros", "futilityProbabilityEnabled")) { if (!grepl("TRUE|FALSE", toupper(value))) { stop( "the value (", value, ") of '", optionKey, "' must be a logical value" ) } } } } rpact/tests/testthat/test-f_analysis_enrichment_survival.R0000644000176200001440000007413214370207346024001 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_analysis_enrichment_survival.R ## | Creation date: 06 February 2023, 12:09:31 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Analysis Enrichment Survival Function") test_that("'getAnalysisResults': enrichment survival, one sub-population, non-stratified analysis, select S1 at second, gMax = 2", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichmentSurvival} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedTestEnrichmentSurvival} # @refFS[Formula]{fs:testStatisticEnrichmentSurvival} S1 <- getDataset( events = c(37, 35, 22), logRanks = c(1.66, 1.38, 1.22), allocationRatios = c(1, 1, 1) ) F <- getDataset( events = c(66, 55, NA), logRanks = c(1.98, 1.57, NA), allocationRatios = c(1, 1, NA) ) dataInput1 <- getDataset(S1 = S1, F = F) ## Comparison of the results of DatasetSurvival object 'dataInput1' with expected results expect_equal(dataInput1$events, c(37, 66, 35, 55, 22, NA_real_)) expect_equal(dataInput1$allocationRatios, c(1, 1, 1, 1, 1, NA_real_), tolerance = 1e-07) expect_equal(dataInput1$logRanks, c(1.66, 1.98, 1.38, 1.57, 1.22, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput1), NA))) expect_output(print(dataInput1)$show()) invisible(capture.output(expect_error(summary(dataInput1), NA))) expect_output(summary(dataInput1)$show()) dataInput1CodeBased <- eval(parse(text = getObjectRCode(dataInput1, stringWrapParagraphWidth = NULL))) expect_equal(dataInput1CodeBased$events, dataInput1$events, tolerance = 1e-05) expect_equal(dataInput1CodeBased$allocationRatios, dataInput1$allocationRatios, tolerance = 1e-05) expect_equal(dataInput1CodeBased$logRanks, dataInput1$logRanks, tolerance = 1e-05) expect_type(names(dataInput1), "character") df <- as.data.frame(dataInput1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design1 <- getDesignInverseNormal( kMax = 3, typeOfDesign = "asP", typeBetaSpending = "bsKD", gammaB = 1.3, alpha = 0.025, informationRates = c(0.4, 0.7, 1), bindingFutility = FALSE, beta = 0.1 ) x1 <- getAnalysisResults( design = design1, dataInput = dataInput1, directionUpper = TRUE, stage = 3, allocationRatioPlanned = 1, intersectionTest = "SpiessensDebois" ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x1' with expected results expect_equal(x1$thetaH1[1, ], 1.6657832, tolerance = 1e-07) expect_equal(x1$thetaH1[2, ], NA_real_) expect_equal(x1$conditionalRejectionProbabilities[1, ], c(0.082268614, 0.17873234, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[2, ], c(0.10062364, 0.20651274, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[1, ], c(0.77807449, 0.90042909, 0.98057908), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[2, ], c(0.89663713, 0.9859619, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[1, ], c(3.8287647, 3.0779079, 2.8418481), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[2, ], c(2.9564481, 2.5412465, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[1, ], c(0.09262834, 0.035310721, 0.016798032), tolerance = 1e-07) expect_equal(x1$repeatedPValues[2, ], c(0.074049848, 0.03027247, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$thetaH1, x1$thetaH1, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() x2 <- getAnalysisResults( design = design1, dataInput = dataInput1, directionUpper = TRUE, stage = 3, allocationRatioPlanned = 1, intersectionTest = "Sidak" ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x2' with expected results expect_equal(x2$thetaH1[1, ], 1.6657832, tolerance = 1e-07) expect_equal(x2$thetaH1[2, ], NA_real_) expect_equal(x2$conditionalRejectionProbabilities[1, ], c(0.082268614, 0.14135111, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[2, ], c(0.08442718, 0.14135111, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[1, ], c(0.76355966, 0.87078132, 0.95099133), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[2, ], c(0.88408373, 0.96064864, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[1, ], c(3.9015478, 3.1815164, 2.9283489), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[2, ], c(2.9984281, 2.606883, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[1, ], c(0.09262834, 0.044241863, 0.02067471), tolerance = 1e-07) expect_equal(x2$repeatedPValues[2, ], c(0.090100155, 0.044241863, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$thetaH1, x2$thetaH1, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design2 <- getDesignFisher(kMax = 3, method = "equalAlpha", alpha = 0.025, informationRates = c(0.4, 0.7, 1)) x3 <- getAnalysisResults( design = design2, dataInput = dataInput1, stratifiedAnalysis = TRUE, directionUpper = TRUE, stage = 2, nPlanned = 30, allocationRatioPlanned = 1, intersectionTest = "SpiessensDebois" ) ## Comparison of the results of AnalysisResultsEnrichmentFisher object 'x3' with expected results expect_equal(x3$thetaH1[1, ], 1.6607445, tolerance = 1e-07) expect_equal(x3$thetaH1[2, ], 1.5814324, tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities[1, ], c(0.058300881, 0.080849353, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities[2, ], c(0.073230522, 0.100897, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower[1, ], c(NA_real_, NA_real_, 0.49594042), tolerance = 1e-07) expect_equal(x3$conditionalPower[2, ], c(NA_real_, NA_real_, 0.49151681), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[1, ], c(0.77887144, 0.87495484, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[2, ], c(0.89732462, 0.9655584, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[1, ], c(3.8248463, 3.1694643, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[2, ], c(2.9541829, 2.6004038, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues[1, ], c(0.086600177, 0.047636937, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues[2, ], c(0.070085432, 0.040358509, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$thetaH1, x3$thetaH1, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': enrichment survival, one sub-population, stratified data input, select S1 at first, gMax = 2", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichmentSurvival} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedTestEnrichmentSurvival} # @refFS[Formula]{fs:testStatisticEnrichmentSurvival} S1 <- getDataset( overallExpectedEvents = c(13.4, 35.4, 43.7), overallEvents = c(16, 38, 47), overallVarianceEvents = c(2.8, 4.7, 3.4), overallAllocationRatios = c(1, 1, 1) ) R <- getDataset( overallExpectedEvents = c(23.3, NA, NA), overallEvents = c(27, NA, NA), overallVarianceEvents = c(3.9, NA, NA), overallAllocationRatios = c(1, NA, NA) ) dataInput2 <- getDataset(S1 = S1, R = R) ## Comparison of the results of DatasetEnrichmentSurvival object 'dataInput2' with expected results expect_equal(dataInput2$events, c(16, 27, 22, NA_real_, 9, NA_real_)) expect_equal(dataInput2$allocationRatios, c(1, 1, 1, NA_real_, 1, NA_real_), tolerance = 1e-07) expect_equal(dataInput2$expectedEvents, c(13.4, 23.3, 22, NA_real_, 8.3, NA_real_), tolerance = 1e-07) expect_equal(dataInput2$varianceEvents, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput2), NA))) expect_output(print(dataInput2)$show()) invisible(capture.output(expect_error(summary(dataInput2), NA))) expect_output(summary(dataInput2)$show()) dataInput2CodeBased <- eval(parse(text = getObjectRCode(dataInput2, stringWrapParagraphWidth = NULL))) expect_equal(dataInput2CodeBased$events, dataInput2$events, tolerance = 1e-05) expect_equal(dataInput2CodeBased$allocationRatios, dataInput2$allocationRatios, tolerance = 1e-05) expect_equal(dataInput2CodeBased$expectedEvents, dataInput2$expectedEvents, tolerance = 1e-05) expect_equal(dataInput2CodeBased$varianceEvents, dataInput2$varianceEvents, tolerance = 1e-05) expect_type(names(dataInput2), "character") df <- as.data.frame(dataInput2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design1 <- getDesignInverseNormal( kMax = 3, typeOfDesign = "asP", typeBetaSpending = "bsKD", gammaB = 1.3, alpha = 0.025, informationRates = c(0.4, 0.7, 1), bindingFutility = FALSE, beta = 0.1 ) x4 <- getAnalysisResults( design = design1, dataInput = dataInput2, stratifiedAnalysis = TRUE, directionUpper = TRUE, stage = 2, nPlanned = 30, thetaH1 = 2.5, allocationRatioPlanned = 1, intersectionTest = "SpiessensDebois" ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x4' with expected results expect_equal(x4$conditionalRejectionProbabilities[1, ], c(0.066531397, 0.014937437, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities[2, ], c(0.21112037, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalPower[1, ], c(NA_real_, NA_real_, 0.63217527), tolerance = 1e-07) expect_equal(x4$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x4$repeatedConfidenceIntervalLowerBounds[1, ], c(0.63929986, 0.68758318, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds[2, ], c(0.99553926, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds[1, ], c(7.397772, 3.5674257, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds[2, ], c(4.4332688, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues[1, ], c(0.11491566, 0.11491566, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues[2, ], c(0.026005739, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$conditionalRejectionProbabilities, x4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPower, x4$conditionalPower, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalLowerBounds, x4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalUpperBounds, x4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedPValues, x4$repeatedPValues, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': enrichment survival, two sub-populations, non-stratified analysis, select S1 and S2 at first IA, select S1 at second, gMax = 3", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichmentRates} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedTestEnrichmentRates} # @refFS[Formula]{fs:testStatisticEnrichmentRates} design1 <- getDesignInverseNormal( kMax = 3, typeOfDesign = "asP", typeBetaSpending = "bsKD", gammaB = 1.3, alpha = 0.02, informationRates = c(0.4, 0.7, 1), bindingFutility = FALSE, beta = 0.1 ) F <- getDataset( events = c(66, NA, NA), logRanks = -c(2.18, NA, NA) ) S1 <- getDataset( events = c(37, 13, 26), logRanks = -c(1.66, 1.239, 0.785) ) S2 <- getDataset( events = c(31, 18, NA), logRanks = -c(1.98, 1.064, NA) ) dataInput3 <- getDataset(S1 = S1, S2 = S2, F = F) ## Comparison of the results of DatasetSurvival object 'dataInput3' with expected results expect_equal(dataInput3$events, c(37, 31, 66, 13, 18, NA_real_, 26, NA_real_, NA_real_)) expect_equal(dataInput3$allocationRatios, c(1, 1, 1, 1, 1, NA_real_, 1, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(dataInput3$logRanks, c(-1.66, -1.98, -2.18, -1.239, -1.064, NA_real_, -0.785, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput3), NA))) expect_output(print(dataInput3)$show()) invisible(capture.output(expect_error(summary(dataInput3), NA))) expect_output(summary(dataInput3)$show()) dataInput3CodeBased <- eval(parse(text = getObjectRCode(dataInput3, stringWrapParagraphWidth = NULL))) expect_equal(dataInput3CodeBased$events, dataInput3$events, tolerance = 1e-05) expect_equal(dataInput3CodeBased$allocationRatios, dataInput3$allocationRatios, tolerance = 1e-05) expect_equal(dataInput3CodeBased$logRanks, dataInput3$logRanks, tolerance = 1e-05) expect_type(names(dataInput3), "character") df <- as.data.frame(dataInput3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x1 <- getAnalysisResults( design = design1, dataInput = dataInput3, directionUpper = FALSE, stage = 2, nPlanned = 30, allocationRatioPlanned = 1, intersectionTest = "Sidak" ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x1' with expected results expect_equal(x1$thetaH1[1, ], 0.55845203, tolerance = 1e-07) expect_equal(x1$thetaH1[2, ], 0.53035001, tolerance = 1e-07) expect_equal(x1$thetaH1[3, ], NA_real_) expect_equal(x1$conditionalRejectionProbabilities[1, ], c(0.063444981, 0.051842822, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[2, ], c(0.065210901, 0.051842822, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[3, ], c(0.070888966, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower[1, ], c(NA_real_, NA_real_, 0.48733039), tolerance = 1e-07) expect_equal(x1$conditionalPower[2, ], c(NA_real_, NA_real_, 0.54365075), tolerance = 1e-07) expect_equal(x1$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[1, ], c(0.23870487, 0.2370187, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[2, ], c(0.1863782, 0.22932092, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[3, ], c(0.30101352, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[1, ], c(1.406238, 1.2861572, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[2, ], c(1.2936975, 1.2386982, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[3, ], c(1.1356925, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[1, ], c(0.09262834, 0.074349301, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[2, ], c(0.090100155, 0.074349301, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[3, ], c(0.082670093, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$thetaH1, x1$thetaH1, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': enrichment survival, two sub-populations, stratified analysis, select S1 and S2 at first IA, select S1 at second, gMax = 3", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichmentRates} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedTestEnrichmentRates} # @refFS[Formula]{fs:testStatisticEnrichmentRates} S1 <- getDataset( overallExpectedEvents = c(13.4, 35.4, 43.7), overallEvents = c(16, 37, 47), overallVarianceEvents = c(2.8, 4.7, 3.4), overallAllocationRatios = c(1, 1, 1) ) S2 <- getDataset( overallExpectedEvents = c(11.5, 31.1, NA), overallEvents = c(15, 33, NA), overallVarianceEvents = c(2.2, 4.4, NA), overallAllocationRatios = c(1, 1, NA) ) S12 <- getDataset( overallExpectedEvents = c(10.1, 29.6, 39.1), overallEvents = c(11, 31, 42), overallVarianceEvents = c(2.8, 4.7, 3.4), overallAllocationRatios = c(1, 1, 1) ) R <- getDataset( overallExpectedEvents = c(23.3, NA, NA), overallEvents = c(25, NA, NA), overallVarianceEvents = c(3.9, NA, NA), overallAllocationRatios = c(1, NA, NA) ) dataInput4 <- getDataset(S1 = S1, S2 = S2, S12 = S12, R = R) ## Comparison of the results of DatasetEnrichmentSurvival object 'dataInput4' with expected results expect_equal(dataInput4$events, c(16, 15, 11, 25, 21, 18, 20, NA_real_, 10, NA_real_, 11, NA_real_)) expect_equal(dataInput4$allocationRatios, c(1, 1, 1, 1, 1, 1, 1, NA_real_, 1, NA_real_, 1, NA_real_), tolerance = 1e-07) expect_equal(dataInput4$expectedEvents, c(13.4, 11.5, 10.1, 23.3, 22, 19.6, 19.5, NA_real_, 8.3, NA_real_, 9.5, NA_real_), tolerance = 1e-07) expect_equal(dataInput4$varianceEvents, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput4), NA))) expect_output(print(dataInput4)$show()) invisible(capture.output(expect_error(summary(dataInput4), NA))) expect_output(summary(dataInput4)$show()) dataInput4CodeBased <- eval(parse(text = getObjectRCode(dataInput4, stringWrapParagraphWidth = NULL))) expect_equal(dataInput4CodeBased$events, dataInput4$events, tolerance = 1e-05) expect_equal(dataInput4CodeBased$allocationRatios, dataInput4$allocationRatios, tolerance = 1e-05) expect_equal(dataInput4CodeBased$expectedEvents, dataInput4$expectedEvents, tolerance = 1e-05) expect_equal(dataInput4CodeBased$varianceEvents, dataInput4$varianceEvents, tolerance = 1e-05) expect_type(names(dataInput4), "character") df <- as.data.frame(dataInput4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design1 <- getDesignInverseNormal( kMax = 3, typeOfDesign = "asP", typeBetaSpending = "bsKD", gammaB = 1.3, alpha = 0.02, informationRates = c(0.4, 0.7, 1), bindingFutility = FALSE, beta = 0.1 ) x2 <- getAnalysisResults( design = design1, dataInput = dataInput4, stratifiedAnalysis = TRUE, directionUpper = TRUE, stage = 2, nPlanned = 30, thetaH1 = 2, allocationRatioPlanned = 1, intersectionTest = "Sidak" ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x2' with expected results expect_equal(x2$conditionalRejectionProbabilities[1, ], c(0.043010929, 0.0010677592, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[2, ], c(0.063395248, 0.0010677592, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[3, ], c(0.15397803, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower[1, ], c(NA_real_, NA_real_, 0.12050895), tolerance = 1e-07) expect_equal(x2$conditionalPower[2, ], c(NA_real_, NA_real_, 0.12050895), tolerance = 1e-07) expect_equal(x2$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[1, ], c(0.62578554, 0.64439022, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[2, ], c(0.75127376, 0.66639106, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[3, ], c(0.96321381, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[1, ], c(4.9893102, 2.8192192, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[2, ], c(6.2314391, 3.0969281, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[3, ], c(3.5981376, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[1, ], c(0.13298203, 0.13298203, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[2, ], c(0.092701773, 0.092701773, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[3, ], c(0.031299575, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) rpact/tests/testthat/test-f_simulation_base_rates.R0000644000176200001440000015075614440565454022377 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_simulation_base_rates.R ## | Creation date: 06 February 2023, 12:13:50 ## | File version: $Revision: 7065 $ ## | Last changed: $Date: 2023-06-09 11:04:44 +0200 (Fr, 09 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Simulation Rates Function") test_that("'getSimulationRates': check several configurations", { # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationRates} # @refFS[Formula]{fs:simulationOneArmRatesGenerate} # @refFS[Formula]{fs:pValuesOneRateAlternativeGreater} # @refFS[Formula]{fs:pValuesOneRateAlternativeSmaller} # @refFS[Formula]{fs:simulationTwoArmRatesGenerate} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeGreater} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeSmaller} # @refFS[Formula]{fs:pValuesTwoRatesApproximationAlternativeGreater} # @refFS[Formula]{fs:pValuesTwoRatesApproximationAlternativeSmaller} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} seed <- 99123 maxNumberOfIterations <- 100 options(width = 180) maxNumberOfSubjects <- 90 informationRates <- (1:3) / 3 plannedSubjects <- round(informationRates * maxNumberOfSubjects) x1 <- getSimulationRates( design = getDesignInverseNormal( futilityBounds = c(-0.5, 0.5), informationRates = informationRates ), groups = 2, riskRatio = TRUE, thetaH0 = 0.8, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, seed = seed ) ## Comparison of the results of SimulationResultsRates object 'x1' with expected results expect_equal(x1$effect, c(0.2, 0.7, 1.2, 1.7), tolerance = 1e-07) expect_equal(x1$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x1$iterations[2, ], c(78, 93, 99, 96)) expect_equal(x1$iterations[3, ], c(41, 68, 56, 40)) expect_equal(x1$overallReject, c(0.05, 0.23, 0.74, 0.88), tolerance = 1e-07) expect_equal(x1$rejectPerStage[1, ], c(0, 0, 0, 0.04), tolerance = 1e-07) expect_equal(x1$rejectPerStage[2, ], c(0.02, 0.04, 0.34, 0.54), tolerance = 1e-07) expect_equal(x1$rejectPerStage[3, ], c(0.03, 0.19, 0.4, 0.3), tolerance = 1e-07) expect_equal(x1$futilityStop, c(0.57, 0.28, 0.1, 0.02), tolerance = 1e-07) expect_equal(x1$futilityPerStage[1, ], c(0.22, 0.07, 0.01, 0), tolerance = 1e-07) expect_equal(x1$futilityPerStage[2, ], c(0.35, 0.21, 0.09, 0.02), tolerance = 1e-07) expect_equal(x1$earlyStop, c(0.59, 0.32, 0.44, 0.6), tolerance = 1e-07) expect_equal(x1$expectedNumberOfSubjects, c(65.7, 78.3, 76.5, 70.8), tolerance = 1e-07) expect_equal(x1$sampleSizes[1, ], c(30, 30, 30, 30)) expect_equal(x1$sampleSizes[2, ], c(30, 30, 30, 30)) expect_equal(x1$sampleSizes[3, ], c(30, 30, 30, 30)) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(0.090943215, 0.15808459, 0.48521663, 0.52642331), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[3, ], c(0.22475932, 0.38294099, 0.60961381, 0.67377136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$effect, x1$effect, tolerance = 1e-05) expect_equal(x1CodeBased$iterations, x1$iterations, tolerance = 1e-05) expect_equal(x1CodeBased$overallReject, x1$overallReject, tolerance = 1e-05) expect_equal(x1CodeBased$rejectPerStage, x1$rejectPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$futilityStop, x1$futilityStop, tolerance = 1e-05) expect_equal(x1CodeBased$futilityPerStage, x1$futilityPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$earlyStop, x1$earlyStop, tolerance = 1e-05) expect_equal(x1CodeBased$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x1CodeBased$sampleSizes, x1$sampleSizes, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPowerAchieved, x1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x2 <- getSimulationRates( design = getDesignInverseNormal( futilityBounds = c(-0.5, 0.5), informationRates = informationRates ), groups = 2, riskRatio = FALSE, thetaH0 = -0.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, seed = seed ) ## Comparison of the results of SimulationResultsRates object 'x2' with expected results expect_equal(x2$effect, c(0.1, 0.2, 0.3, 0.4), tolerance = 1e-07) expect_equal(x2$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x2$iterations[2, ], c(84, 95, 100, 97)) expect_equal(x2$iterations[3, ], c(55, 73, 64, 42)) expect_equal(x2$overallReject, c(0.08, 0.39, 0.81, 0.88), tolerance = 1e-07) expect_equal(x2$rejectPerStage[1, ], c(0, 0, 0, 0.03), tolerance = 1e-07) expect_equal(x2$rejectPerStage[2, ], c(0.02, 0.09, 0.33, 0.53), tolerance = 1e-07) expect_equal(x2$rejectPerStage[3, ], c(0.06, 0.3, 0.48, 0.32), tolerance = 1e-07) expect_equal(x2$futilityStop, c(0.43, 0.18, 0.03, 0.02), tolerance = 1e-07) expect_equal(x2$futilityPerStage[1, ], c(0.16, 0.05, 0, 0), tolerance = 1e-07) expect_equal(x2$futilityPerStage[2, ], c(0.27, 0.13, 0.03, 0.02), tolerance = 1e-07) expect_equal(x2$earlyStop, c(0.45, 0.27, 0.36, 0.58), tolerance = 1e-07) expect_equal(x2$expectedNumberOfSubjects, c(71.7, 80.4, 79.2, 71.7), tolerance = 1e-07) expect_equal(x2$sampleSizes[1, ], c(30, 30, 30, 30)) expect_equal(x2$sampleSizes[2, ], c(30, 30, 30, 30)) expect_equal(x2$sampleSizes[3, ], c(30, 30, 30, 30)) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0.10237911, 0.25306891, 0.43740091, 0.54067879), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[3, ], c(0.30171473, 0.4623858, 0.59071853, 0.68245332), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$effect, x2$effect, tolerance = 1e-05) expect_equal(x2CodeBased$iterations, x2$iterations, tolerance = 1e-05) expect_equal(x2CodeBased$overallReject, x2$overallReject, tolerance = 1e-05) expect_equal(x2CodeBased$rejectPerStage, x2$rejectPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$futilityStop, x2$futilityStop, tolerance = 1e-05) expect_equal(x2CodeBased$futilityPerStage, x2$futilityPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$earlyStop, x2$earlyStop, tolerance = 1e-05) expect_equal(x2CodeBased$expectedNumberOfSubjects, x2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x2CodeBased$sampleSizes, x2$sampleSizes, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPowerAchieved, x2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() x3 <- getSimulationRates( design = getDesignInverseNormal( futilityBounds = c(-0.5, 0.5), informationRates = informationRates ), groups = 1, thetaH0 = 0.2, pi1 = seq(0.2, 0.4, 0.05), plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, seed = seed ) ## Comparison of the results of SimulationResultsRates object 'x3' with expected results expect_equal(x3$effect, c(0, 0.05, 0.1, 0.15, 0.2), tolerance = 1e-07) expect_equal(x3$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x3$iterations[2, ], c(78, 91, 96, 90, 72)) expect_equal(x3$iterations[3, ], c(32, 65, 62, 37, 6)) expect_equal(x3$overallReject, c(0.03, 0.3, 0.6, 0.93, 0.99), tolerance = 1e-07) expect_equal(x3$rejectPerStage[1, ], c(0, 0.02, 0.04, 0.1, 0.28), tolerance = 1e-07) expect_equal(x3$rejectPerStage[2, ], c(0.01, 0.06, 0.28, 0.53, 0.66), tolerance = 1e-07) expect_equal(x3$rejectPerStage[3, ], c(0.02, 0.22, 0.28, 0.3, 0.05), tolerance = 1e-07) expect_equal(x3$futilityStop, c(0.67, 0.27, 0.06, 0, 0), tolerance = 1e-07) expect_equal(x3$futilityPerStage[1, ], c(0.22, 0.07, 0, 0, 0), tolerance = 1e-07) expect_equal(x3$futilityPerStage[2, ], c(0.45, 0.2, 0.06, 0, 0), tolerance = 1e-07) expect_equal(x3$earlyStop, c(0.68, 0.35, 0.38, 0.63, 0.94), tolerance = 1e-07) expect_equal(x3$expectedNumberOfSubjects, c(63, 76.8, 77.4, 68.1, 53.4), tolerance = 1e-07) expect_equal(x3$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x3$sampleSizes[2, ], c(30, 30, 30, 30, 30)) expect_equal(x3$sampleSizes[3, ], c(30, 30, 30, 30, 30)) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(0.12773913, 0.18983473, 0.36146118, 0.53982038, 0.7268178), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[3, ], c(0.32676971, 0.35596086, 0.46114911, 0.56126649, 0.75350644), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$effect, x3$effect, tolerance = 1e-05) expect_equal(x3CodeBased$iterations, x3$iterations, tolerance = 1e-05) expect_equal(x3CodeBased$overallReject, x3$overallReject, tolerance = 1e-05) expect_equal(x3CodeBased$rejectPerStage, x3$rejectPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$futilityStop, x3$futilityStop, tolerance = 1e-05) expect_equal(x3CodeBased$futilityPerStage, x3$futilityPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$earlyStop, x3$earlyStop, tolerance = 1e-05) expect_equal(x3CodeBased$expectedNumberOfSubjects, x3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x3CodeBased$sampleSizes, x3$sampleSizes, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPowerAchieved, x3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x4 <- getSimulationRates( design = getDesignInverseNormal( futilityBounds = c(0.5, 0.5), informationRates = informationRates ), groups = 2, riskRatio = TRUE, thetaH0 = 1.5, pi1 = seq(0.05, 0.25, 0.05), plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, directionUpper = FALSE, allocationRatioPlanned = 3, seed = seed ) ## Comparison of the results of SimulationResultsRates object 'x4' with expected results expect_equal(x4$effect, c(-1.25, -1, -0.75, -0.5, -0.25), tolerance = 1e-07) expect_equal(x4$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x4$iterations[2, ], c(74, 64, 47, 36, 39)) expect_equal(x4$iterations[3, ], c(28, 28, 30, 20, 25)) expect_equal(x4$overallReject, c(0.66, 0.51, 0.19, 0.08, 0.1), tolerance = 1e-07) expect_equal(x4$rejectPerStage[1, ], c(0.06, 0.05, 0.02, 0, 0), tolerance = 1e-07) expect_equal(x4$rejectPerStage[2, ], c(0.43, 0.29, 0.09, 0.04, 0.04), tolerance = 1e-07) expect_equal(x4$rejectPerStage[3, ], c(0.17, 0.17, 0.08, 0.04, 0.06), tolerance = 1e-07) expect_equal(x4$futilityStop, c(0.23, 0.38, 0.59, 0.76, 0.71), tolerance = 1e-07) expect_equal(x4$futilityPerStage[1, ], c(0.2, 0.31, 0.51, 0.64, 0.61), tolerance = 1e-07) expect_equal(x4$futilityPerStage[2, ], c(0.03, 0.07, 0.08, 0.12, 0.1), tolerance = 1e-07) expect_equal(x4$earlyStop, c(0.72, 0.72, 0.7, 0.8, 0.75), tolerance = 1e-07) expect_equal(x4$expectedNumberOfSubjects, c(60.6, 57.6, 53.1, 46.8, 49.2), tolerance = 1e-07) expect_equal(x4$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x4$sampleSizes[2, ], c(30, 30, 30, 30, 30)) expect_equal(x4$sampleSizes[3, ], c(30, 30, 30, 30, 30)) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.65569733, 0.50411153, 0.40992455, 0.37112776, 0.28877148), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[3, ], c(0.52876953, 0.55375049, 0.46252843, 0.37280654, 0.34687207), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$effect, x4$effect, tolerance = 1e-05) expect_equal(x4CodeBased$iterations, x4$iterations, tolerance = 1e-05) expect_equal(x4CodeBased$overallReject, x4$overallReject, tolerance = 1e-05) expect_equal(x4CodeBased$rejectPerStage, x4$rejectPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$futilityStop, x4$futilityStop, tolerance = 1e-05) expect_equal(x4CodeBased$futilityPerStage, x4$futilityPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$earlyStop, x4$earlyStop, tolerance = 1e-05) expect_equal(x4CodeBased$expectedNumberOfSubjects, x4$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x4CodeBased$sampleSizes, x4$sampleSizes, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPowerAchieved, x4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x5 <- getSimulationRates( design = getDesignInverseNormal( futilityBounds = c(0.5, 0.5), informationRates = informationRates ), groups = 2, riskRatio = FALSE, thetaH0 = 0.1, plannedSubjects = plannedSubjects, maxNumberOfIterations = maxNumberOfIterations, allocationRatioPlanned = 3, directionUpper = FALSE, seed = seed ) ## Comparison of the results of SimulationResultsRates object 'x5' with expected results expect_equal(x5$effect, c(-0.1, 2.7755576e-17, 0.1, 0.2), tolerance = 1e-07) expect_equal(x5$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x5$iterations[2, ], c(50, 41, 12, 2)) expect_equal(x5$iterations[3, ], c(34, 29, 3, 0)) expect_equal(x5$overallReject, c(0.22, 0.03, 0, 0), tolerance = 1e-07) expect_equal(x5$rejectPerStage[1, ], c(0.01, 0, 0, 0), tolerance = 1e-07) expect_equal(x5$rejectPerStage[2, ], c(0.09, 0.02, 0, 0), tolerance = 1e-07) expect_equal(x5$rejectPerStage[3, ], c(0.12, 0.01, 0, 0), tolerance = 1e-07) expect_equal(x5$futilityStop, c(0.56, 0.69, 0.97, 1), tolerance = 1e-07) expect_equal(x5$futilityPerStage[1, ], c(0.49, 0.59, 0.88, 0.98), tolerance = 1e-07) expect_equal(x5$futilityPerStage[2, ], c(0.07, 0.1, 0.09, 0.02), tolerance = 1e-07) expect_equal(x5$earlyStop, c(0.66, 0.71, 0.97, 1), tolerance = 1e-07) expect_equal(x5$expectedNumberOfSubjects, c(55.2, 51, 34.5, 30.6), tolerance = 1e-07) expect_equal(x5$sampleSizes[1, ], c(30, 30, 30, 30)) expect_equal(x5$sampleSizes[2, ], c(30, 30, 30, 30)) expect_equal(x5$sampleSizes[3, ], c(30, 30, 30, 0)) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.36523014, 0.20927326, 0.16995311, 0.25129054), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[3, ], c(0.43064609, 0.32068397, 0.041565592, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$effect, x5$effect, tolerance = 1e-05) expect_equal(x5CodeBased$iterations, x5$iterations, tolerance = 1e-05) expect_equal(x5CodeBased$overallReject, x5$overallReject, tolerance = 1e-05) expect_equal(x5CodeBased$rejectPerStage, x5$rejectPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$futilityStop, x5$futilityStop, tolerance = 1e-05) expect_equal(x5CodeBased$futilityPerStage, x5$futilityPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$earlyStop, x5$earlyStop, tolerance = 1e-05) expect_equal(x5CodeBased$expectedNumberOfSubjects, x5$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x5CodeBased$sampleSizes, x5$sampleSizes, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPowerAchieved, x5$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x6 <- getSimulationRates( design = getDesignInverseNormal( futilityBounds = c(0.5, 0.5), informationRates = informationRates ), groups = 1, thetaH0 = 0.4, pi1 = seq(0.2, 0.4, 0.05), plannedSubjects = plannedSubjects, directionUpper = FALSE, maxNumberOfIterations = maxNumberOfIterations, seed = seed ) ## Comparison of the results of SimulationResultsRates object 'x6' with expected results expect_equal(x6$effect, c(-0.2, -0.15, -0.1, -0.05, 0), tolerance = 1e-07) expect_equal(x6$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x6$iterations[2, ], c(91, 89, 66, 56, 39)) expect_equal(x6$iterations[3, ], c(19, 49, 51, 48, 24)) expect_equal(x6$overallReject, c(0.92, 0.78, 0.4, 0.15, 0.03), tolerance = 1e-07) expect_equal(x6$rejectPerStage[1, ], c(0.03, 0.01, 0, 0, 0), tolerance = 1e-07) expect_equal(x6$rejectPerStage[2, ], c(0.72, 0.4, 0.14, 0.01, 0.01), tolerance = 1e-07) expect_equal(x6$rejectPerStage[3, ], c(0.17, 0.37, 0.26, 0.14, 0.02), tolerance = 1e-07) expect_equal(x6$futilityStop, c(0.06, 0.1, 0.35, 0.51, 0.75), tolerance = 1e-07) expect_equal(x6$futilityPerStage[1, ], c(0.06, 0.1, 0.34, 0.44, 0.61), tolerance = 1e-07) expect_equal(x6$futilityPerStage[2, ], c(0, 0, 0.01, 0.07, 0.14), tolerance = 1e-07) expect_equal(x6$earlyStop, c(0.81, 0.51, 0.49, 0.52, 0.76), tolerance = 1e-07) expect_equal(x6$expectedNumberOfSubjects, c(63, 71.4, 65.1, 61.2, 48.9), tolerance = 1e-07) expect_equal(x6$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x6$sampleSizes[2, ], c(30, 30, 30, 30, 30)) expect_equal(x6$sampleSizes[3, ], c(30, 30, 30, 30, 30)) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.72335875, 0.55247274, 0.3843863, 0.29482523, 0.18598438), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[3, ], c(0.71459365, 0.68392316, 0.54740245, 0.39208559, 0.15519282), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$effect, x6$effect, tolerance = 1e-05) expect_equal(x6CodeBased$iterations, x6$iterations, tolerance = 1e-05) expect_equal(x6CodeBased$overallReject, x6$overallReject, tolerance = 1e-05) expect_equal(x6CodeBased$rejectPerStage, x6$rejectPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$futilityStop, x6$futilityStop, tolerance = 1e-05) expect_equal(x6CodeBased$futilityPerStage, x6$futilityPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$earlyStop, x6$earlyStop, tolerance = 1e-05) expect_equal(x6CodeBased$expectedNumberOfSubjects, x6$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x6CodeBased$sampleSizes, x6$sampleSizes, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPowerAchieved, x6$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x7 <- getSimulationRates( design = getDesignInverseNormal(futilityBounds = c(0.5), typeOfDesign = "P"), thetaH0 = 0.3, groups = 1, plannedSubjects = c(30, 60), pi1 = seq(0.3, 0.5, 0.05), maxNumberOfIterations = maxNumberOfIterations, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(30, 30), maxNumberOfSubjectsPerStage = 5 * c(NA, 30), directionUpper = TRUE, seed = seed ) ## Comparison of the results of SimulationResultsRates object 'x7' with expected results expect_equal(x7$effect, c(0, 0.05, 0.1, 0.15, 0.2), tolerance = 1e-07) expect_equal(x7$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x7$iterations[2, ], c(25, 41, 53, 50, 35)) expect_equal(x7$overallReject, c(0.05, 0.18, 0.47, 0.77, 0.91), tolerance = 1e-07) expect_equal(x7$rejectPerStage[1, ], c(0.02, 0.06, 0.15, 0.36, 0.59), tolerance = 1e-07) expect_equal(x7$rejectPerStage[2, ], c(0.03, 0.12, 0.32, 0.41, 0.32), tolerance = 1e-07) expect_equal(x7$futilityPerStage[1, ], c(0.73, 0.53, 0.32, 0.14, 0.06), tolerance = 1e-07) expect_equal(x7$earlyStop, c(0.75, 0.59, 0.47, 0.5, 0.65), tolerance = 1e-07) expect_equal(x7$expectedNumberOfSubjects, c(58.56, 77.43, 83.21, 80.96, 58.83), tolerance = 1e-07) expect_equal(x7$sampleSizes[1, ], c(30, 30, 30, 30, 30)) expect_equal(x7$sampleSizes[2, ], c(114.24, 115.68293, 100.39623, 101.92, 82.371429), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(0.60107965, 0.60407724, 0.68409402, 0.68536207, 0.68807468), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7), NA))) expect_output(print(x7)$show()) invisible(capture.output(expect_error(summary(x7), NA))) expect_output(summary(x7)$show()) x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL))) expect_equal(x7CodeBased$effect, x7$effect, tolerance = 1e-05) expect_equal(x7CodeBased$iterations, x7$iterations, tolerance = 1e-05) expect_equal(x7CodeBased$overallReject, x7$overallReject, tolerance = 1e-05) expect_equal(x7CodeBased$rejectPerStage, x7$rejectPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$futilityPerStage, x7$futilityPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$earlyStop, x7$earlyStop, tolerance = 1e-05) expect_equal(x7CodeBased$expectedNumberOfSubjects, x7$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x7CodeBased$sampleSizes, x7$sampleSizes, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalPowerAchieved, x7$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x7), "character") df <- as.data.frame(x7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x8 <- getSimulationRates( design = getDesignGroupSequential( futilityBounds = c(0.5, 0.5), typeOfDesign = "P" ), thetaH0 = 0.3, groups = 2, allocationRatioPlanned = 3, plannedSubjects = (1:3) * 100, pi1 = seq(0.2, 0.4, 0.05), pi2 = 0.2, maxNumberOfIterations = maxNumberOfIterations, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(100, 100, 100), maxNumberOfSubjectsPerStage = 5 * c(NA, 100, 100), directionUpper = FALSE, seed = seed ) ## Comparison of the results of SimulationResultsRates object 'x8' with expected results expect_equal(x8$effect, c(-0.3, -0.25, -0.2, -0.15, -0.1), tolerance = 1e-07) expect_equal(x8$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x8$iterations[2, ], c(7, 23, 41, 52, 59)) expect_equal(x8$iterations[3, ], c(0, 1, 1, 11, 20)) expect_equal(x8$overallReject, c(1, 0.98, 0.95, 0.81, 0.61), tolerance = 1e-07) expect_equal(x8$rejectPerStage[1, ], c(0.93, 0.75, 0.54, 0.29, 0.1), tolerance = 1e-07) expect_equal(x8$rejectPerStage[2, ], c(0.07, 0.22, 0.4, 0.41, 0.37), tolerance = 1e-07) expect_equal(x8$rejectPerStage[3, ], c(0, 0.01, 0.01, 0.11, 0.14), tolerance = 1e-07) expect_equal(x8$futilityStop, c(0, 0.02, 0.05, 0.19, 0.33), tolerance = 1e-07) expect_equal(x8$futilityPerStage[1, ], c(0, 0.02, 0.05, 0.19, 0.31), tolerance = 1e-07) expect_equal(x8$futilityPerStage[2, ], c(0, 0, 0, 0, 0.02), tolerance = 1e-07) expect_equal(x8$earlyStop, c(1, 0.99, 0.99, 0.89, 0.8), tolerance = 1e-07) expect_equal(x8$expectedNumberOfSubjects, c(115.79, 135.33, 201.46, 331.88, 420.15), tolerance = 1e-07) expect_equal(x8$sampleSizes[1, ], c(100, 100, 100, 100, 100)) expect_equal(x8$sampleSizes[2, ], c(225.57143, 148.73913, 239.7561, 361.73077, 405.05085), tolerance = 1e-07) expect_equal(x8$sampleSizes[3, ], c(0, 112, 316, 398, 405.85), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.79294349, 0.80728899, 0.77763316, 0.64160567, 0.53147513), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[3, ], c(NaN, 0.80069037, 0.80071364, 0.56677072, 0.57523679), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8), NA))) expect_output(print(x8)$show()) invisible(capture.output(expect_error(summary(x8), NA))) expect_output(summary(x8)$show()) x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL))) expect_equal(x8CodeBased$effect, x8$effect, tolerance = 1e-05) expect_equal(x8CodeBased$iterations, x8$iterations, tolerance = 1e-05) expect_equal(x8CodeBased$overallReject, x8$overallReject, tolerance = 1e-05) expect_equal(x8CodeBased$rejectPerStage, x8$rejectPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$futilityStop, x8$futilityStop, tolerance = 1e-05) expect_equal(x8CodeBased$futilityPerStage, x8$futilityPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$earlyStop, x8$earlyStop, tolerance = 1e-05) expect_equal(x8CodeBased$expectedNumberOfSubjects, x8$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x8CodeBased$sampleSizes, x8$sampleSizes, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalPowerAchieved, x8$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x8), "character") df <- as.data.frame(x8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x9 <- getSimulationRates( design = getDesignGroupSequential( futilityBounds = c(0), typeOfDesign = "P" ), thetaH0 = 0.8, groups = 2, riskRatio = TRUE, allocationRatioPlanned = c(1, 3), maxNumberOfIterations = maxNumberOfIterations, plannedSubjects = c(100, 200), pi1 = seq(0.15, 0.4, 0.05), pi2 = 0.2, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(100, 100), maxNumberOfSubjectsPerStage = 5 * c(NA, 100), directionUpper = TRUE, seed = seed ) ## Comparison of the results of SimulationResultsRates object 'x9' with expected results expect_equal(x9$effect, c(-0.05, 0.2, 0.45, 0.7, 0.95, 1.2), tolerance = 1e-07) expect_equal(x9$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x9$iterations[2, ], c(49, 64, 67, 73, 30, 22)) expect_equal(x9$overallReject, c(0.02, 0.12, 0.56, 0.87, 0.98, 1), tolerance = 1e-07) expect_equal(x9$rejectPerStage[1, ], c(0.01, 0.03, 0.18, 0.23, 0.7, 0.78), tolerance = 1e-07) expect_equal(x9$rejectPerStage[2, ], c(0.01, 0.09, 0.38, 0.64, 0.28, 0.22), tolerance = 1e-07) expect_equal(x9$futilityPerStage[1, ], c(0.5, 0.33, 0.15, 0.04, 0, 0), tolerance = 1e-07) expect_equal(x9$earlyStop, c(0.51, 0.36, 0.33, 0.27, 0.7, 0.78), tolerance = 1e-07) expect_equal(x9$expectedNumberOfSubjects, c(328.6, 380.04, 347.29, 334.93, 196.64, 161.85), tolerance = 1e-07) expect_equal(x9$sampleSizes[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x9$sampleSizes[2, ], c(466.53061, 437.5625, 369.08955, 321.82192, 322.13333, 281.13636), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.29198192, 0.45521064, 0.55401782, 0.65262445, 0.65733765, 0.7515248), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x9), NA))) expect_output(print(x9)$show()) invisible(capture.output(expect_error(summary(x9), NA))) expect_output(summary(x9)$show()) x9CodeBased <- eval(parse(text = getObjectRCode(x9, stringWrapParagraphWidth = NULL))) expect_equal(x9CodeBased$effect, x9$effect, tolerance = 1e-05) expect_equal(x9CodeBased$iterations, x9$iterations, tolerance = 1e-05) expect_equal(x9CodeBased$overallReject, x9$overallReject, tolerance = 1e-05) expect_equal(x9CodeBased$rejectPerStage, x9$rejectPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$futilityPerStage, x9$futilityPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$earlyStop, x9$earlyStop, tolerance = 1e-05) expect_equal(x9CodeBased$expectedNumberOfSubjects, x9$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x9CodeBased$sampleSizes, x9$sampleSizes, tolerance = 1e-05) expect_equal(x9CodeBased$conditionalPowerAchieved, x9$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x9), "character") df <- as.data.frame(x9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } calcSubjectsFunctionSimulationBaseRates <- function(..., stage, plannedSubjects, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, conditionalCriticalValue, overallRate) { if (overallRate[1] - overallRate[2] < 0.1) { return(plannedSubjects[stage] - plannedSubjects[stage - 1]) } else { rateUnderH0 <- (overallRate[1] + overallRate[2]) / 2 stageSubjects <- 2 * (max(0, conditionalCriticalValue * sqrt(2 * rateUnderH0 * (1 - rateUnderH0)) + stats::qnorm(conditionalPower) * sqrt(overallRate[1] * (1 - overallRate[1]) + overallRate[2] * (1 - overallRate[2]))))^2 / (max(1e-12, (overallRate[1] - overallRate[2])))^2 stageSubjects <- ceiling(min(max( minNumberOfSubjectsPerStage[stage], stageSubjects ), maxNumberOfSubjectsPerStage[stage])) return(stageSubjects) } } x10 <- getSimulationRates( design = getDesignInverseNormal(kMax = 2), pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, plannedSubjects = c(40, 80), minNumberOfSubjectsPerStage = c(40, 20), maxNumberOfSubjectsPerStage = c(40, 160), conditionalPower = 0.8, calcSubjectsFunction = calcSubjectsFunctionSimulationBaseRates, maxNumberOfIterations = maxNumberOfIterations, seed = seed ) ## Comparison of the results of SimulationResultsRates object 'x10' with expected results expect_equal(x10$effect, c(0, 0.1, 0.2, 0.3), tolerance = 1e-07) expect_equal(x10$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x10$iterations[2, ], c(100, 99, 95, 75)) expect_equal(x10$overallReject, c(0.02, 0.2, 0.52, 0.89), tolerance = 1e-07) expect_equal(x10$rejectPerStage[1, ], c(0, 0.01, 0.05, 0.25), tolerance = 1e-07) expect_equal(x10$rejectPerStage[2, ], c(0.02, 0.19, 0.47, 0.64), tolerance = 1e-07) expect_equal(x10$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x10$earlyStop, c(0, 0.01, 0.05, 0.25), tolerance = 1e-07) expect_equal(x10$expectedNumberOfSubjects, c(104.34, 113.7, 101.87, 83.69), tolerance = 1e-07) expect_equal(x10$sampleSizes[1, ], c(40, 40, 40, 40)) expect_equal(x10$sampleSizes[2, ], c(64.34, 74.444444, 65.126316, 58.253333), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.20349537, 0.39194633, 0.57556995, 0.71162895), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x10), NA))) expect_output(print(x10)$show()) invisible(capture.output(expect_error(summary(x10), NA))) expect_output(summary(x10)$show()) x10CodeBased <- eval(parse(text = getObjectRCode(x10, stringWrapParagraphWidth = NULL))) expect_equal(x10CodeBased$effect, x10$effect, tolerance = 1e-05) expect_equal(x10CodeBased$iterations, x10$iterations, tolerance = 1e-05) expect_equal(x10CodeBased$overallReject, x10$overallReject, tolerance = 1e-05) expect_equal(x10CodeBased$rejectPerStage, x10$rejectPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$futilityPerStage, x10$futilityPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$earlyStop, x10$earlyStop, tolerance = 1e-05) expect_equal(x10CodeBased$expectedNumberOfSubjects, x10$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x10CodeBased$sampleSizes, x10$sampleSizes, tolerance = 1e-05) expect_equal(x10CodeBased$conditionalPowerAchieved, x10$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x10), "character") df <- as.data.frame(x10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationRates': comparison with getPowerRates() results for a inverse normal design", { .skipTestIfNotX64() .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationRates} # @refFS[Formula]{fs:simulationOneArmRatesGenerate} # @refFS[Formula]{fs:pValuesOneRateAlternativeGreater} # @refFS[Formula]{fs:pValuesOneRateAlternativeSmaller} # @refFS[Formula]{fs:simulationTwoArmRatesGenerate} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeGreater} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeSmaller} # @refFS[Formula]{fs:pValuesTwoRatesApproximationAlternativeGreater} # @refFS[Formula]{fs:pValuesTwoRatesApproximationAlternativeSmaller} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} design <- getDesignInverseNormal(futilityBounds = c(-1), informationRates = c(0.5, 1), typeOfDesign = "P") x <- getSimulationRates(design, thetaH0 = 0.4, groups = 1, plannedSubjects = c(150, 300), pi1 = seq(0.3, 0.4, 0.02), maxNumberOfIterations = 1000, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA_real_, 100), maxNumberOfSubjectsPerStage = c(NA_real_, 500), directionUpper = FALSE, seed = 123 ) y <- getPowerRates(design, thetaH0 = 0.4, groups = 1, pi1 = seq(0.3, 0.4, 0.02), directionUpper = FALSE, maxNumberOfSubjects = 300 ) expectedNumberOfSubjectsDiff <- round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects) / 300, 4) ## Comparison of the results of numeric object 'expectedNumberOfSubjectsDiff' with expected results expect_equal(expectedNumberOfSubjectsDiff, c(0.2203, 0.4265, 0.625, 0.8158, 0.9639, 0.9543), tolerance = 1e-07) overallRejectDiff <- round(x$overallReject - y$overallReject, 4) ## Comparison of the results of numeric object 'overallRejectDiff' with expected results expect_equal(overallRejectDiff, c(0.052, 0.1567, 0.2226, 0.1407, 0.0249, -0.008), tolerance = 1e-07) rejectPerStageDiff <- round(x$rejectPerStage - y$rejectPerStage, 4) ## Comparison of the results of matrixarray object 'rejectPerStageDiff' with expected results expect_equal(rejectPerStageDiff[1, ], c(-0.0439, -0.0644, -0.027, -0.0138, 0.0042, -0.0067), tolerance = 1e-07) expect_equal(rejectPerStageDiff[2, ], c(0.0959, 0.2211, 0.2497, 0.1545, 0.0207, -0.0013), tolerance = 1e-07) futilityPerStageDiff <- round(x$futilityPerStage - y$futilityPerStage, 4) ## Comparison of the results of matrixarray object 'futilityPerStageDiff' with expected results expect_equal(futilityPerStageDiff[1, ], c(-2e-04, 0.0018, -0.0011, -0.0092, -0.0279, -0.0147), tolerance = 1e-07) }) test_that("'getSimulationRates': comparison with getPowerRates() results for a group sequential design", { .skipTestIfNotX64() .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationRates} # @refFS[Formula]{fs:simulationTwoArmRatesGenerate} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeGreater} # @refFS[Formula]{fs:pValuesTwoRatesAlternativeSmaller} # @refFS[Formula]{fs:pValuesTwoRatesApproximationAlternativeGreater} # @refFS[Formula]{fs:pValuesTwoRatesApproximationAlternativeSmaller} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} design <- getDesignGroupSequential(futilityBounds = c(-1, 1), typeOfDesign = "P") x <- getSimulationRates(design, thetaH0 = 0.3, groups = 2, allocationRatioPlanned = 2, plannedSubjects = (1:3) * 100, pi1 = seq(0.2, 0.4, 0.05), pi2 = 0.1, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(100, 100, 100), maxNumberOfSubjectsPerStage = 1 * c(100, 100, 100), directionUpper = FALSE, seed = 123 ) y <- getPowerRates(design, thetaH0 = 0.3, groups = 2, allocationRatioPlanned = 2, pi1 = seq(0.2, 0.4, 0.05), pi2 = 0.1, directionUpper = FALSE, maxNumberOfSubjects = 300 ) expectedNumberOfSubjectsDiff <- round((x$expectedNumberOfSubjects - y$expectedNumberOfSubjects) / 300, 4) ## Comparison of the results of numeric object 'expectedNumberOfSubjectsDiff' with expected results expect_equal(expectedNumberOfSubjectsDiff, c(-0.0076, -0.0264, -0.0251, -0.0066, -0.0023), tolerance = 1e-07) overallRejectDiff <- round(x$overallReject - y$overallReject, 4) ## Comparison of the results of numeric object 'overallRejectDiff' with expected results expect_equal(overallRejectDiff, c(9e-04, 0.0072, 0.0177, -9e-04, -6e-04), tolerance = 1e-07) rejectPerStageDiff <- round(x$rejectPerStage - y$rejectPerStage, 4) ## Comparison of the results of matrixarray object 'rejectPerStageDiff' with expected results expect_equal(rejectPerStageDiff[1, ], c(0.0121, 0.0444, 0.0355, 0.0081, 0.001), tolerance = 1e-07) expect_equal(rejectPerStageDiff[2, ], c(-0.0032, -0.0171, 0.009, -0.0062, -0.0019), tolerance = 1e-07) expect_equal(rejectPerStageDiff[3, ], c(-0.008, -0.02, -0.0268, -0.0028, 3e-04), tolerance = 1e-07) futilityPerStageDiff <- round(x$futilityPerStage - y$futilityPerStage, 4) ## Comparison of the results of matrixarray object 'futilityPerStageDiff' with expected results expect_equal(futilityPerStageDiff[1, ], c(-1e-04, 0, 0.0049, 0.0058, 0.0053), tolerance = 1e-07) expect_equal(futilityPerStageDiff[2, ], c(0.0018, 0.0077, -0.0146, -0.0016, -0.0038), tolerance = 1e-07) ## -- x2 <- getSimulationRates( design = getDesignGroupSequential(futilityBounds = c(-1, 1), typeOfDesign = "P"), thetaH0 = 0.8, groups = 2, riskRatio = TRUE, allocationRatioPlanned = 2, plannedSubjects = c(100, 200, 300), pi1 = seq(0.15, 0.4, 0.05), pi2 = 0.2, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA_real_, 150, 300), maxNumberOfSubjectsPerStage = c(NA_real_, 200, 300), directionUpper = TRUE, maxNumberOfIterations = 1000, seed = 123 ) y2 <- getPowerRates( design = getDesignGroupSequential(futilityBounds = c(-1, 1), typeOfDesign = "P"), thetaH0 = 0.8, groups = 2, riskRatio = TRUE, allocationRatioPlanned = 2, pi1 = seq(0.15, 0.4, 0.05), pi2 = 0.2, maxNumberOfSubjects = 300, directionUpper = TRUE ) expectedNumberOfSubjectsDiff2 <- round((x2$expectedNumberOfSubjects - y2$expectedNumberOfSubjects) / 300, 4) ## Comparison of the results of numeric object 'expectedNumberOfSubjectsDiff2' with expected results expect_equal(expectedNumberOfSubjectsDiff2, c(0.336, 0.5853, 0.5882, 0.3089, 0.1411, 0.079), tolerance = 1e-07) overallRejectDiff2 <- round(x2$overallReject - y2$overallReject, 4) ## Comparison of the results of numeric object 'overallRejectDiff2' with expected results expect_equal(overallRejectDiff2, c(0.0032, 0.0559, 0.2444, 0.1617, 0.0401, 0.0038), tolerance = 1e-07) rejectPerStageDiff2 <- round(x2$rejectPerStage - y2$rejectPerStage, 4) ## Comparison of the results of matrixarray object 'rejectPerStageDiff2' with expected results expect_equal(rejectPerStageDiff2[1, ], c(6e-04, -0.0126, -0.0203, -0.0149, -0.0029, -0.0228), tolerance = 1e-07) expect_equal(rejectPerStageDiff2[2, ], c(0.0025, 0.0084, 0.104, 0.1808, 0.1029, 0.0508), tolerance = 1e-07) expect_equal(rejectPerStageDiff2[3, ], c(1e-04, 0.0601, 0.1607, -0.0041, -0.06, -0.0242), tolerance = 1e-07) futilityPerStageDiff2 <- round(x2$futilityPerStage - y2$futilityPerStage, 4) ## Comparison of the results of matrixarray object 'futilityPerStageDiff2' with expected results expect_equal(futilityPerStageDiff2[1, ], c(-0.0028, -0.016, -0.0034, -3e-04, -5e-04, -1e-04), tolerance = 1e-07) expect_equal(futilityPerStageDiff2[2, ], c(-0.0068, -0.0474, -0.0917, -0.0386, -0.0101, -0.0011), tolerance = 1e-07) }) test_that("'getSimulationRates': check results for a Fisher design", { .skipTestIfDisabled() design <- getDesignFisher(informationRates = c(0.3, 0.7, 1), method = "fullAlpha", alpha0Vec = c(0.5, 0.4), kMax = 3) simulationRates1 <- getSimulationRates(design, plannedSubjects = c(60, 120, 180), pi1 = seq(0.1, 0.4, 0.05), maxNumberOfIterations = 1000, allocationRatioPlanned = 0.5, pi2 = 0.4, directionUpper = FALSE, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10, 10), maxNumberOfSubjectsPerStage = c(NA, 180, 180), seed = 123 ) ## Comparison of the results of SimulationResultsRates object 'simulationRates1' with expected results expect_equal(simulationRates1$effect, c(-0.3, -0.25, -0.2, -0.15, -0.1, -0.05, 0), tolerance = 1e-07) expect_equal(simulationRates1$iterations[1, ], c(1000, 1000, 1000, 1000, 1000, 1000, 1000)) expect_equal(simulationRates1$iterations[2, ], c(411, 632, 728, 760, 697, 619, 472)) expect_equal(simulationRates1$iterations[3, ], c(29, 90, 237, 420, 459, 366, 199)) expect_equal(simulationRates1$overallReject, c(0.998, 0.964, 0.89, 0.711, 0.344, 0.137, 0.017), tolerance = 1e-07) expect_equal(simulationRates1$rejectPerStage[1, ], c(0.588, 0.339, 0.205, 0.131, 0.059, 0.023, 0.006), tolerance = 1e-07) expect_equal(simulationRates1$rejectPerStage[2, ], c(0.381, 0.54, 0.479, 0.305, 0.112, 0.048, 0.003), tolerance = 1e-07) expect_equal(simulationRates1$rejectPerStage[3, ], c(0.029, 0.085, 0.206, 0.275, 0.173, 0.066, 0.008), tolerance = 1e-07) expect_equal(simulationRates1$futilityStop, c(0.002, 0.031, 0.079, 0.144, 0.37, 0.563, 0.792), tolerance = 1e-07) expect_equal(simulationRates1$futilityPerStage[1, ], c(0.001, 0.029, 0.067, 0.109, 0.244, 0.358, 0.522), tolerance = 1e-07) expect_equal(simulationRates1$futilityPerStage[2, ], c(0.001, 0.002, 0.012, 0.035, 0.126, 0.205, 0.27), tolerance = 1e-07) expect_equal(simulationRates1$earlyStop, c(0.971, 0.91, 0.763, 0.58, 0.541, 0.634, 0.801), tolerance = 1e-07) expect_equal(simulationRates1$expectedNumberOfSubjects, c(109.16, 151.807, 194.562, 235.465, 243.315, 225.808, 175.721), tolerance = 1e-07) expect_equal(simulationRates1$sampleSizes[1, ], c(60, 60, 60, 60, 60, 60, 60)) expect_equal(simulationRates1$sampleSizes[2, ], c(116.19951, 134.30854, 148.23489, 155.03158, 161.43185, 169.31018, 172.5339), tolerance = 1e-07) expect_equal(simulationRates1$sampleSizes[3, ], c(48.344828, 76.933333, 112.4346, 137.24048, 154.24183, 166.68033, 172.28643), tolerance = 1e-07) expect_equal(simulationRates1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationRates1$conditionalPowerAchieved[2, ], c(0.70093565, 0.61462148, 0.51405671, 0.45076255, 0.40053168, 0.29444079, 0.24961225), tolerance = 1e-07) expect_equal(simulationRates1$conditionalPowerAchieved[3, ], c(0.80719762, 0.7654714, 0.69355873, 0.56900387, 0.46210075, 0.33019595, 0.24268208), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationRates1), NA))) expect_output(print(simulationRates1)$show()) invisible(capture.output(expect_error(summary(simulationRates1), NA))) expect_output(summary(simulationRates1)$show()) simulationRates1CodeBased <- eval(parse(text = getObjectRCode(simulationRates1, stringWrapParagraphWidth = NULL))) expect_equal(simulationRates1CodeBased$effect, simulationRates1$effect, tolerance = 1e-05) expect_equal(simulationRates1CodeBased$iterations, simulationRates1$iterations, tolerance = 1e-05) expect_equal(simulationRates1CodeBased$overallReject, simulationRates1$overallReject, tolerance = 1e-05) expect_equal(simulationRates1CodeBased$rejectPerStage, simulationRates1$rejectPerStage, tolerance = 1e-05) expect_equal(simulationRates1CodeBased$futilityStop, simulationRates1$futilityStop, tolerance = 1e-05) expect_equal(simulationRates1CodeBased$futilityPerStage, simulationRates1$futilityPerStage, tolerance = 1e-05) expect_equal(simulationRates1CodeBased$earlyStop, simulationRates1$earlyStop, tolerance = 1e-05) expect_equal(simulationRates1CodeBased$expectedNumberOfSubjects, simulationRates1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationRates1CodeBased$sampleSizes, simulationRates1$sampleSizes, tolerance = 1e-05) expect_equal(simulationRates1CodeBased$conditionalPowerAchieved, simulationRates1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationRates1), "character") df <- as.data.frame(simulationRates1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationRates1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } simulationRates2 <- getSimulationRates(design, plannedSubjects = c(60, 120, 180), pi1 = seq(0.1, 0.4, 0.05), maxNumberOfIterations = 1000, directionUpper = FALSE, conditionalPower = 0.8, groups = 1L, thetaH0 = 0.7, minNumberOfSubjectsPerStage = c(NA, 10, 10), maxNumberOfSubjectsPerStage = c(NA, 180, 180), seed = 123 ) ## Comparison of the results of SimulationResultsRates object 'simulationRates2' with expected results expect_equal(simulationRates2$effect, c(-0.6, -0.55, -0.5, -0.45, -0.4, -0.35, -0.3), tolerance = 1e-07) expect_equal(simulationRates2$iterations[1, ], c(1000, 1000, 1000, 1000, 1000, 1000, 1000)) expect_equal(simulationRates2$iterations[2, ], c(0, 0, 0, 0, 0, 1, 6)) expect_equal(simulationRates2$iterations[3, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(simulationRates2$overallReject, c(1, 1, 1, 1, 1, 1, 1)) expect_equal(simulationRates2$rejectPerStage[1, ], c(1, 1, 1, 1, 1, 0.999, 0.994), tolerance = 1e-07) expect_equal(simulationRates2$rejectPerStage[2, ], c(0, 0, 0, 0, 0, 0.001, 0.006), tolerance = 1e-07) expect_equal(simulationRates2$rejectPerStage[3, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(simulationRates2$futilityStop, c(0, 0, 0, 0, 0, 0, 0)) expect_equal(simulationRates2$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(simulationRates2$futilityPerStage[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(simulationRates2$earlyStop, c(1, 1, 1, 1, 1, 1, 1)) expect_equal(simulationRates2$expectedNumberOfSubjects, c(60, 60, 60, 60, 60, 60.058, 60.468), tolerance = 1e-07) expect_equal(simulationRates2$sampleSizes[1, ], c(60, 60, 60, 60, 60, 60, 60)) expect_equal(simulationRates2$sampleSizes[2, ], c(0, 0, 0, 0, 0, 58, 78)) expect_equal(simulationRates2$sampleSizes[3, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(simulationRates2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationRates2$conditionalPowerAchieved[2, ], c(NaN, NaN, NaN, NaN, NaN, 0.80316434, 0.80203432), tolerance = 1e-07) expect_equal(simulationRates2$conditionalPowerAchieved[3, ], c(NaN, NaN, NaN, NaN, NaN, NaN, NaN)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationRates2), NA))) expect_output(print(simulationRates2)$show()) invisible(capture.output(expect_error(summary(simulationRates2), NA))) expect_output(summary(simulationRates2)$show()) simulationRates2CodeBased <- eval(parse(text = getObjectRCode(simulationRates2, stringWrapParagraphWidth = NULL))) expect_equal(simulationRates2CodeBased$effect, simulationRates2$effect, tolerance = 1e-05) expect_equal(simulationRates2CodeBased$iterations, simulationRates2$iterations, tolerance = 1e-05) expect_equal(simulationRates2CodeBased$overallReject, simulationRates2$overallReject, tolerance = 1e-05) expect_equal(simulationRates2CodeBased$rejectPerStage, simulationRates2$rejectPerStage, tolerance = 1e-05) expect_equal(simulationRates2CodeBased$futilityStop, simulationRates2$futilityStop, tolerance = 1e-05) expect_equal(simulationRates2CodeBased$futilityPerStage, simulationRates2$futilityPerStage, tolerance = 1e-05) expect_equal(simulationRates2CodeBased$earlyStop, simulationRates2$earlyStop, tolerance = 1e-05) expect_equal(simulationRates2CodeBased$expectedNumberOfSubjects, simulationRates2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationRates2CodeBased$sampleSizes, simulationRates2$sampleSizes, tolerance = 1e-05) expect_equal(simulationRates2CodeBased$conditionalPowerAchieved, simulationRates2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationRates2), "character") df <- as.data.frame(simulationRates2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationRates2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Simulation base rates functions throw errors when arguments are missing or wrong", { expect_error(getSimulationRates()) }) rpact/tests/testthat/test-f_analysis_multiarm_means.R0000644000176200001440000007651414370207346022735 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_analysis_multiarm_means.R ## | Creation date: 06 February 2023, 12:10:00 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Analysis Means Functionality for Three or More Treatments") test_that("'getAnalysisResultsMultiArm' with dataset of means", { design1 <- getDesignInverseNormal( kMax = 4, alpha = 0.02, futilityBounds = c(-0.5, 0, 0.5), bindingFutility = FALSE, typeOfDesign = "asKD", gammaA = 1.2, informationRates = c(0.15, 0.4, 0.7, 1) ) design2 <- getDesignFisher( kMax = 4, alpha = 0.02, alpha0Vec = c(0.7, 0.5, 0.3), method = "equalAlpha", bindingFutility = TRUE, informationRates = c(0.15, 0.4, 0.7, 1) ) design3 <- getDesignConditionalDunnett(alpha = 0.02, informationAtInterim = 0.4, secondStageConditioning = TRUE) # directionUpper = TRUE dataExample1 <- getDataset( n1 = c(13, 25), n2 = c(15, NA), n3 = c(14, 27), n4 = c(12, 29), means1 = c(24.2, 22.2), means2 = c(18.8, NA), means3 = c(26.7, 27.7), means4 = c(9.2, 12.2), stDevs1 = c(24.4, 22.1), stDevs2 = c(21.2, NA), stDevs3 = c(25.6, 23.2), stDevs4 = c(21.5, 22.7) ) # directionUpper = FALSE dataExample2 <- getDataset( n1 = c(13, 25), n2 = c(15, NA), n3 = c(14, 27), n4 = c(12, 29), means1 = -c(24.2, 22.2), means2 = -c(18.8, NA), means3 = -c(26.7, 27.7), means4 = -c(9.2, 12.2), stDevs1 = c(24.4, 22.1), stDevs2 = c(21.2, NA), stDevs3 = c(25.6, 23.2), stDevs4 = c(21.5, 22.7) ) # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} results1 <- getAnalysisResults(design = design1, dataInput = dataExample1, intersectionTest = "Simes", varianceOption = "overallPooled", nPlanned = c(20, 20), normalApproximation = FALSE, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results1' with expected results expect_equal(results1$thetaH1[1, ], 11.562259, tolerance = 1e-05) expect_equal(results1$thetaH1[2, ], NA_real_) expect_equal(results1$thetaH1[3, ], 16.036585, tolerance = 1e-05) expect_equal(results1$assumedStDevs[1, ], 22.357668, tolerance = 1e-05) expect_equal(results1$assumedStDevs[2, ], NA_real_) expect_equal(results1$assumedStDevs[3, ], 22.943518, tolerance = 1e-05) expect_equal(results1$conditionalRejectionProbabilities[1, ], c(0.040740209, 0.14372404, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$conditionalRejectionProbabilities[2, ], c(0.033856262, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$conditionalRejectionProbabilities[3, ], c(0.049414261, 0.33374326, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$conditionalPower[1, ], c(NA_real_, NA_real_, 0.42712247, 0.6790579), tolerance = 1e-05) expect_equal(results1$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(results1$conditionalPower[3, ], c(NA_real_, NA_real_, 0.82244694, 0.94484021), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalLowerBounds[1, ], c(-16.567569, -4.662798, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalLowerBounds[2, ], c(-20.940706, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalLowerBounds[3, ], c(-13.521691, 0.049006969, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalUpperBounds[1, ], c(46.567569, 28.528695, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalUpperBounds[2, ], c(40.140706, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalUpperBounds[3, ], c(48.521691, 32.491814, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedPValues[1, ], c(0.5, 0.08542716, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedPValues[2, ], c(0.5, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedPValues[3, ], c(0.5, 0.017966281, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results1), NA))) expect_output(print(results1)$show()) invisible(capture.output(expect_error(summary(results1), NA))) expect_output(summary(results1)$show()) results1CodeBased <- eval(parse(text = getObjectRCode(results1, stringWrapParagraphWidth = NULL))) expect_equal(results1CodeBased$thetaH1, results1$thetaH1, tolerance = 1e-05) expect_equal(results1CodeBased$assumedStDevs, results1$assumedStDevs, tolerance = 1e-05) expect_equal(results1CodeBased$conditionalRejectionProbabilities, results1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results1CodeBased$conditionalPower, results1$conditionalPower, tolerance = 1e-05) expect_equal(results1CodeBased$repeatedConfidenceIntervalLowerBounds, results1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results1CodeBased$repeatedConfidenceIntervalUpperBounds, results1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results1CodeBased$repeatedPValues, results1$repeatedPValues, tolerance = 1e-05) expect_type(names(results1), "character") df <- as.data.frame(results1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} results2 <- getAnalysisResults(design = design2, dataInput = dataExample1, intersectionTest = "Sidak", varianceOption = "pairwisePooled", nPlanned = c(20, 20), seed = 1234, iterations = 1000, normalApproximation = TRUE, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results2' with expected results expect_equal(results2$thetaH1[1, ], 11.562259, tolerance = 1e-05) expect_equal(results2$thetaH1[2, ], NA_real_) expect_equal(results2$thetaH1[3, ], 16.036585, tolerance = 1e-05) expect_equal(results2$assumedStDevs[1, ], 22.357668, tolerance = 1e-05) expect_equal(results2$assumedStDevs[2, ], NA_real_) expect_equal(results2$assumedStDevs[3, ], 22.943518, tolerance = 1e-05) expect_equal(results2$conditionalRejectionProbabilities[1, ], c(0.024748593, 0.053966892, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$conditionalRejectionProbabilities[2, ], c(0.021915713, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$conditionalRejectionProbabilities[3, ], c(0.0267758, 1, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalLowerBounds[1, ], c(-10.38015, -4.0770639, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalLowerBounds[2, ], c(-13.116502, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalLowerBounds[3, ], c(-8.2525514, 0.41959343, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalUpperBounds[1, ], c(40.38015, 26.720108, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalUpperBounds[2, ], c(32.316502, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalUpperBounds[3, ], c(43.252551, 31.62149, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedPValues[1, ], c(0.17335289, 0.062127989, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedPValues[2, ], c(0.20285189, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedPValues[3, ], c(0.15638134, 0.015781417, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$conditionalPowerSimulated[1, ], c(NA_real_, NA_real_, 0.277, 0.453), tolerance = 1e-05) expect_equal(results2$conditionalPowerSimulated[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(results2$conditionalPowerSimulated[3, ], c(NA_real_, NA_real_, 1, 1)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results2), NA))) expect_output(print(results2)$show()) invisible(capture.output(expect_error(summary(results2), NA))) expect_output(summary(results2)$show()) results2CodeBased <- eval(parse(text = getObjectRCode(results2, stringWrapParagraphWidth = NULL))) expect_equal(results2CodeBased$thetaH1, results2$thetaH1, tolerance = 1e-05) expect_equal(results2CodeBased$assumedStDevs, results2$assumedStDevs, tolerance = 1e-05) expect_equal(results2CodeBased$conditionalRejectionProbabilities, results2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results2CodeBased$repeatedConfidenceIntervalLowerBounds, results2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results2CodeBased$repeatedConfidenceIntervalUpperBounds, results2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results2CodeBased$repeatedPValues, results2$repeatedPValues, tolerance = 1e-05) expect_equal(results2CodeBased$conditionalPowerSimulated, results2$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(results2), "character") df <- as.data.frame(results2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} results3 <- getAnalysisResults(design = design3, dataInput = dataExample1, intersectionTest = "Dunnett", varianceOption = "overallPooled", normalApproximation = TRUE, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsConditionalDunnett object 'results3' with expected results expect_equal(results3$thetaH1[1, ], 11.562259, tolerance = 3e-04) expect_equal(results3$thetaH1[2, ], NA_real_) expect_equal(results3$thetaH1[3, ], 16.036585, tolerance = 3e-04) expect_equal(results3$assumedStDevs[1, ], 22.357668, tolerance = 3e-04) expect_equal(results3$assumedStDevs[2, ], NA_real_) expect_equal(results3$assumedStDevs[3, ], 22.943518, tolerance = 3e-04) expect_equal(results3$conditionalRejectionProbabilities[1, ], c(NA_real_, 0.061352393), tolerance = 3e-04) expect_equal(results3$conditionalRejectionProbabilities[2, ], c(NA_real_, 0.037447419), tolerance = 3e-04) expect_equal(results3$conditionalRejectionProbabilities[3, ], c(NA_real_, 0.08651207), tolerance = 3e-04) expect_equal(results3$conditionalPower[1, ], c(NA_real_, NA_real_)) expect_equal(results3$conditionalPower[2, ], c(NA_real_, NA_real_)) expect_equal(results3$conditionalPower[3, ], c(NA_real_, NA_real_)) expect_equal(results3$repeatedConfidenceIntervalLowerBounds[1, ], c(NA_real_, -0.72440621), tolerance = 3e-04) expect_equal(results3$repeatedConfidenceIntervalLowerBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results3$repeatedConfidenceIntervalLowerBounds[3, ], c(NA_real_, 3.9389233), tolerance = 3e-04) expect_equal(results3$repeatedConfidenceIntervalUpperBounds[1, ], c(NA_real_, 22.538721), tolerance = 3e-04) expect_equal(results3$repeatedConfidenceIntervalUpperBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results3$repeatedConfidenceIntervalUpperBounds[3, ], c(NA_real_, 26.753524), tolerance = 3e-04) expect_equal(results3$repeatedPValues[1, ], c(NA_real_, 0.017445576), tolerance = 3e-04) expect_equal(results3$repeatedPValues[2, ], c(NA_real_, NA_real_)) expect_equal(results3$repeatedPValues[3, ], c(NA_real_, 0.0019493527), tolerance = 3e-04) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results3), NA))) expect_output(print(results3)$show()) invisible(capture.output(expect_error(summary(results3), NA))) expect_output(summary(results3)$show()) results3CodeBased <- eval(parse(text = getObjectRCode(results3, stringWrapParagraphWidth = NULL))) expect_equal(results3CodeBased$thetaH1, results3$thetaH1, tolerance = 1e-05) expect_equal(results3CodeBased$assumedStDevs, results3$assumedStDevs, tolerance = 1e-05) expect_equal(results3CodeBased$conditionalRejectionProbabilities, results3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results3CodeBased$conditionalPower, results3$conditionalPower, tolerance = 1e-05) expect_equal(results3CodeBased$repeatedConfidenceIntervalLowerBounds, results3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results3CodeBased$repeatedConfidenceIntervalUpperBounds, results3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results3CodeBased$repeatedPValues, results3$repeatedPValues, tolerance = 1e-05) expect_type(names(results3), "character") df <- as.data.frame(results3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} results4 <- getAnalysisResults(design = design1, dataInput = dataExample2, intersectionTest = "Bonferroni", varianceOption = "overallPooled", nPlanned = c(20, 20), normalApproximation = TRUE, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results4' with expected results expect_equal(results4$thetaH1[1, ], -11.562259, tolerance = 1e-05) expect_equal(results4$thetaH1[2, ], NA_real_) expect_equal(results4$thetaH1[3, ], -16.036585, tolerance = 1e-05) expect_equal(results4$assumedStDevs[1, ], 22.357668, tolerance = 1e-05) expect_equal(results4$assumedStDevs[2, ], NA_real_) expect_equal(results4$assumedStDevs[3, ], 22.943518, tolerance = 1e-05) expect_equal(results4$conditionalRejectionProbabilities[1, ], c(0.042394596, 0.15198143, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$conditionalRejectionProbabilities[2, ], c(0.034321105, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$conditionalRejectionProbabilities[3, ], c(0.049947129, 0.35588618, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$conditionalPower[1, ], c(NA_real_, NA_real_, 0.44302928, 0.69082025), tolerance = 1e-05) expect_equal(results4$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(results4$conditionalPower[3, ], c(NA_real_, NA_real_, 0.83889182, 0.95069292), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalLowerBounds[1, ], c(-44.802158, -28.113845, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalLowerBounds[2, ], c(-38.432721, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalLowerBounds[3, ], c(-46.786808, -32.10754, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalUpperBounds[1, ], c(14.802158, 4.2854677, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalUpperBounds[2, ], c(19.232721, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalUpperBounds[3, ], c(11.786808, -0.41764226, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedPValues[1, ], c(0.5, 0.078823932, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedPValues[2, ], c(0.5, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedPValues[3, ], c(0.5, 0.015272156, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results4), NA))) expect_output(print(results4)$show()) invisible(capture.output(expect_error(summary(results4), NA))) expect_output(summary(results4)$show()) results4CodeBased <- eval(parse(text = getObjectRCode(results4, stringWrapParagraphWidth = NULL))) expect_equal(results4CodeBased$thetaH1, results4$thetaH1, tolerance = 1e-05) expect_equal(results4CodeBased$assumedStDevs, results4$assumedStDevs, tolerance = 1e-05) expect_equal(results4CodeBased$conditionalRejectionProbabilities, results4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results4CodeBased$conditionalPower, results4$conditionalPower, tolerance = 1e-05) expect_equal(results4CodeBased$repeatedConfidenceIntervalLowerBounds, results4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results4CodeBased$repeatedConfidenceIntervalUpperBounds, results4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results4CodeBased$repeatedPValues, results4$repeatedPValues, tolerance = 1e-05) expect_type(names(results4), "character") df <- as.data.frame(results4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} results5 <- getAnalysisResults(design = design2, dataInput = dataExample2, intersectionTest = "Simes", varianceOption = "pairwisePooled", nPlanned = c(20, 20), seed = 1234, iterations = 1000, normalApproximation = FALSE, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results5' with expected results expect_equal(results5$thetaH1[1, ], -11.562259, tolerance = 1e-05) expect_equal(results5$thetaH1[2, ], NA_real_) expect_equal(results5$thetaH1[3, ], -16.036585, tolerance = 1e-05) expect_equal(results5$assumedStDevs[1, ], 22.357668, tolerance = 1e-05) expect_equal(results5$assumedStDevs[2, ], NA_real_) expect_equal(results5$assumedStDevs[3, ], 22.943518, tolerance = 1e-05) expect_equal(results5$conditionalRejectionProbabilities[1, ], c(0.02248882, 0.047009108, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$conditionalRejectionProbabilities[2, ], c(0.021309255, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$conditionalRejectionProbabilities[3, ], c(0.027044989, 1, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedConfidenceIntervalLowerBounds[1, ], c(-42.972232, -27.481288, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedConfidenceIntervalLowerBounds[2, ], c(-34.436237, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedConfidenceIntervalLowerBounds[3, ], c(-45.763994, -32.295837, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedConfidenceIntervalUpperBounds[1, ], c(12.972232, 4.7692163, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedConfidenceIntervalUpperBounds[2, ], c(15.236237, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedConfidenceIntervalUpperBounds[3, ], c(10.763995, 0.22335705, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedPValues[1, ], c(0.19623626, 0.071653269, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedPValues[2, ], c(0.21026955, NA_real_, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedPValues[3, ], c(0.15433667, 0.019180306, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$conditionalPowerSimulated[1, ], c(NA_real_, NA_real_, 0.256, 0.431), tolerance = 1e-05) expect_equal(results5$conditionalPowerSimulated[2, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(results5$conditionalPowerSimulated[3, ], c(NA_real_, NA_real_, 1, 1)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results5), NA))) expect_output(print(results5)$show()) invisible(capture.output(expect_error(summary(results5), NA))) expect_output(summary(results5)$show()) results5CodeBased <- eval(parse(text = getObjectRCode(results5, stringWrapParagraphWidth = NULL))) expect_equal(results5CodeBased$thetaH1, results5$thetaH1, tolerance = 1e-05) expect_equal(results5CodeBased$assumedStDevs, results5$assumedStDevs, tolerance = 1e-05) expect_equal(results5CodeBased$conditionalRejectionProbabilities, results5$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results5CodeBased$repeatedConfidenceIntervalLowerBounds, results5$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results5CodeBased$repeatedConfidenceIntervalUpperBounds, results5$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results5CodeBased$repeatedPValues, results5$repeatedPValues, tolerance = 1e-05) expect_equal(results5CodeBased$conditionalPowerSimulated, results5$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(results5), "character") df <- as.data.frame(results5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} results6 <- getAnalysisResults(design = design3, dataInput = dataExample2, intersectionTest = "Dunnett", varianceOption = "overallPooled", normalApproximation = TRUE, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsConditionalDunnett object 'results6' with expected results expect_equal(results6$thetaH1[1, ], -11.562259, tolerance = 3e-04) expect_equal(results6$thetaH1[2, ], NA_real_) expect_equal(results6$thetaH1[3, ], -16.036585, tolerance = 3e-04) expect_equal(results6$assumedStDevs[1, ], 22.357668, tolerance = 3e-04) expect_equal(results6$assumedStDevs[2, ], NA_real_) expect_equal(results6$assumedStDevs[3, ], 22.943518, tolerance = 3e-04) expect_equal(results6$conditionalRejectionProbabilities[1, ], c(NA_real_, 0.061352393), tolerance = 3e-04) expect_equal(results6$conditionalRejectionProbabilities[2, ], c(NA_real_, 0.037447419), tolerance = 3e-04) expect_equal(results6$conditionalRejectionProbabilities[3, ], c(NA_real_, 0.08651207), tolerance = 3e-04) expect_equal(results6$conditionalPower[1, ], c(NA_real_, NA_real_)) expect_equal(results6$conditionalPower[2, ], c(NA_real_, NA_real_)) expect_equal(results6$conditionalPower[3, ], c(NA_real_, NA_real_)) expect_equal(results6$repeatedConfidenceIntervalLowerBounds[1, ], c(NA_real_, -22.538721), tolerance = 3e-04) expect_equal(results6$repeatedConfidenceIntervalLowerBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results6$repeatedConfidenceIntervalLowerBounds[3, ], c(NA_real_, -26.753524), tolerance = 3e-04) expect_equal(results6$repeatedConfidenceIntervalUpperBounds[1, ], c(NA_real_, 0.72440621), tolerance = 3e-04) expect_equal(results6$repeatedConfidenceIntervalUpperBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results6$repeatedConfidenceIntervalUpperBounds[3, ], c(NA_real_, -3.9389233), tolerance = 3e-04) expect_equal(results6$repeatedPValues[1, ], c(NA_real_, 0.017445576), tolerance = 3e-04) expect_equal(results6$repeatedPValues[2, ], c(NA_real_, NA_real_)) expect_equal(results6$repeatedPValues[3, ], c(NA_real_, 0.0019493527), tolerance = 3e-04) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results6), NA))) expect_output(print(results6)$show()) invisible(capture.output(expect_error(summary(results6), NA))) expect_output(summary(results6)$show()) results6CodeBased <- eval(parse(text = getObjectRCode(results6, stringWrapParagraphWidth = NULL))) expect_equal(results6CodeBased$thetaH1, results6$thetaH1, tolerance = 1e-05) expect_equal(results6CodeBased$assumedStDevs, results6$assumedStDevs, tolerance = 1e-05) expect_equal(results6CodeBased$conditionalRejectionProbabilities, results6$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results6CodeBased$conditionalPower, results6$conditionalPower, tolerance = 1e-05) expect_equal(results6CodeBased$repeatedConfidenceIntervalLowerBounds, results6$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results6CodeBased$repeatedConfidenceIntervalUpperBounds, results6$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results6CodeBased$repeatedPValues, results6$repeatedPValues, tolerance = 1e-05) expect_type(names(results6), "character") df <- as.data.frame(results6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) rpact/tests/testthat/test-f_analysis_input_validation.R0000644000176200001440000000743014370207346023260 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_analysis_input_validation.R ## | Creation date: 06 February 2023, 12:10:00 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Correct Input Validation of All Analysis Functions") test_that("Errors and warnings for calculation of analysis results with dataset of means", { .skipTestIfDisabled() design1 <- getDesignInverseNormal( kMax = 4, alpha = 0.02, futilityBounds = c(-0.5, 0, 0.5), bindingFutility = FALSE, typeOfDesign = "asKD", gammaA = 1.2, informationRates = c(0.15, 0.4, 0.7, 1) ) design3 <- getDesignConditionalDunnett(alpha = 0.02, informationAtInterim = 0.4, secondStageConditioning = TRUE) dataExample1 <- getDataset( n = c(13, 25), means = c(24.2, 22.2), stDevs = c(24.4, 22.1) ) dataExample2 <- getDataset( n1 = c(13, 25), n2 = c(15, 27), means1 = c(24.2, 22.2), means2 = c(18.8, 27.7), stDevs1 = c(24.4, 22.1), stDevs2 = c(21.2, 23.7) ) dataExample4 <- getDataset( n1 = c(13, 25), n2 = c(15, NA), n3 = c(14, 27), n4 = c(12, 29), means1 = c(24.2, 22.2), means2 = c(18.8, NA), means3 = c(26.7, 27.7), means4 = c(9.2, 12.2), stDevs1 = c(24.4, 22.1), stDevs2 = c(21.2, NA), stDevs3 = c(25.6, 23.2), stDevs4 = c(21.5, 22.7) ) expect_error(getAnalysisResults( design = design1, dataInput = dataExample4, intersectionTest = "", varianceOption = "notPooled", nPlanned = c(20, 20) )) expect_error(getAnalysisResults( design = design1, dataInput = dataExample4, intersectionTest = "Simes", varianceOption = "X", nPlanned = c(20, 20) )) expect_error(getAnalysisResults( design = design1, dataInput = dataExample4, intersectionTest = "Simes", varianceOption = "notPooled", nPlanned = c(20, 20, 30) )) expect_error(getAnalysisResults( design = design1, dataInput = dataExample4, intersectionTest = "Simes", varianceOption = "notPooled", nPlanned = 20 )) expect_error(getAnalysisResults( design = design1, dataInput = dataExample4, intersectionTest = "Simes", varianceOption = "notPooled", nPlanned = c() )) expect_error(getAnalysisResults( design = design1, dataInput = dataExample4, intersectionTest = "Simes", varianceOption = "notPooled", nPlanned = numeric(0) )) expect_error(getAnalysisResults( design = design3, dataInput = dataExample4, intersectionTest = "Dunnett", varianceOption = "pairwisePooled" ), paste0( "Illegal argument: variance option ('pairwisePooled') must be 'overallPooled' ", "because conditional Dunnett test was specified as design" ), fixed = TRUE ) expect_error(getAnalysisResults( design = design1, dataInput = dataExample4, intersectionTest = "Dunnett", varianceOption = "pairwisePooled", nPlanned = c(20, 20) ), "Dunnett t test can only be performed with overall variance estimation", fixed = TRUE ) expect_error(getConditionalPower(getStageResults(design1, dataInput = dataExample2), nPlanned = c(20, 20), allocationRatioPlanned = -1 )) }) rpact/tests/testthat/test-f_analysis_enrichment_rates.R0000644000176200001440000010714414370207346023244 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_analysis_enrichment_rates.R ## | Creation date: 06 February 2023, 12:08:56 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Analysis Enrichment Rates Function") test_that("'getAnalysisResults': enrichment rates, one sub-population, non-stratified input, select S1 at second IA, directionUpper = FALSE, gMax = 2", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichmentRates} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedTestEnrichmentRates} # @refFS[Formula]{fs:testStatisticEnrichmentRates} design1 <- getDesignInverseNormal(kMax = 3, alpha = 0.02, typeOfDesign = "P", informationRates = c(0.4, 0.7, 1)) S1 <- getDataset( sampleSize1 = c(22, 31, 37), sampleSize2 = c(28, 33, 39), events1 = c(7, 16, 17), events2 = c(18, 21, 19) ) F <- getDataset( sampleSize1 = c(46, 54, NA), sampleSize2 = c(49, 62, NA), events1 = c(16, 31, NA), events2 = c(29, 35, NA) ) dataInput1 <- getDataset(S1 = S1, F = F) ## Comparison of the results of DatasetRates object 'dataInput1' with expected results expect_equal(dataInput1$overallSampleSizes, c(22, 46, 28, 49, 53, 100, 61, 111, 90, NA_real_, 100, NA_real_)) expect_equal(dataInput1$overallEvents, c(7, 16, 18, 29, 23, 47, 39, 64, 40, NA_real_, 58, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput1), NA))) expect_output(print(dataInput1)$show()) invisible(capture.output(expect_error(summary(dataInput1), NA))) expect_output(summary(dataInput1)$show()) dataInput1CodeBased <- eval(parse(text = getObjectRCode(dataInput1, stringWrapParagraphWidth = NULL))) expect_equal(dataInput1CodeBased$overallSampleSizes, dataInput1$overallSampleSizes, tolerance = 1e-05) expect_equal(dataInput1CodeBased$overallEvents, dataInput1$overallEvents, tolerance = 1e-05) expect_type(names(dataInput1), "character") df <- as.data.frame(dataInput1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x1 <- getAnalysisResults(design1, dataInput1, stratifiedAnalysis = FALSE, intersectionTest = "SpiessensDebois", allocationRatioPlanned = 0.5, directionUpper = FALSE, normalApproximation = TRUE, stage = 2, nPlanned = c(80) ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x1' with expected results expect_equal(x1$piTreatments[1, ], 0.43396226, tolerance = 1e-07) expect_equal(x1$piTreatments[2, ], 0.47, tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[1, ], c(0.17935206, 0.13861438, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[2, ], c(0.17935206, 0.047432959, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower[1, ], c(NA_real_, NA_real_, 0.74825599), tolerance = 1e-07) expect_equal(x1$conditionalPower[2, ], c(NA_real_, NA_real_, 0.22069678), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.611497, -0.44933646, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.47492278, -0.29773456, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[1, ], c(0.040178241, 0.029773314, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[2, ], c(0.018733891, 0.065139268, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[1, ], c(0.031827909, 0.031827909, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[2, ], c(0.031827909, 0.031827909, NA_real_), tolerance = 1e-07) expect_equal(x1$piControls[1, ], 0.63934426, tolerance = 1e-07) expect_equal(x1$piControls[2, ], 0.57657658, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$piTreatments, x1$piTreatments, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$piControls, x1$piControls, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() x2 <- getAnalysisResults(design1, dataInput1, stratifiedAnalysis = FALSE, intersectionTest = "Bonferroni", allocationRatioPlanned = 0.5, directionUpper = FALSE, normalApproximation = TRUE, stage = 2, nPlanned = c(80) ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x2' with expected results expect_equal(x2$piTreatments[1, ], 0.43396226, tolerance = 1e-07) expect_equal(x2$piTreatments[2, ], 0.47, tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[1, ], c(0.16289564, 0.075460476, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[2, ], c(0.16289564, 0.047432959, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower[1, ], c(NA_real_, NA_real_, 0.62405214), tolerance = 1e-07) expect_equal(x2$conditionalPower[2, ], c(NA_real_, NA_real_, 0.22069678), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.61554799, -0.46343398, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.47860086, -0.31516617, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[1, ], c(0.046721667, 0.044120395, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[2, ], c(0.02350445, 0.081574104, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[1, ], c(0.036684009, 0.036684009, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[2, ], c(0.036684009, 0.036684009, NA_real_), tolerance = 1e-07) expect_equal(x2$piControls[1, ], 0.63934426, tolerance = 1e-07) expect_equal(x2$piControls[2, ], 0.57657658, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$piTreatments, x2$piTreatments, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$piControls, x2$piControls, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': enrichment rates, one sub-population, stratified input, select S1 at second IA, directionUpper = FALSE, gMax = 2", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichmentRates} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedTestEnrichmentRates} # @refFS[Formula]{fs:testStatisticEnrichmentRates} design1 <- getDesignInverseNormal(kMax = 3, alpha = 0.05, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.4, 0.7, 1)) S1 <- getDataset( sampleSize1 = c(22, 31, 37), sampleSize2 = c(28, 33, 39), events1 = c(7, 16, 10), events2 = c(18, 21, 19) ) R <- getDataset( sampleSize1 = c(24, 23, NA), sampleSize2 = c(21, 29, NA), events1 = c(9, 15, NA), events2 = c(11, 14, NA) ) dataInput2 <- getDataset(S1 = S1, R = R) ## Comparison of the results of DatasetRates object 'dataInput2' with expected results expect_equal(dataInput2$overallSampleSizes, c(22, 24, 28, 21, 53, 47, 61, 50, 90, NA_real_, 100, NA_real_)) expect_equal(dataInput2$overallEvents, c(7, 9, 18, 11, 23, 24, 39, 25, 33, NA_real_, 58, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput2), NA))) expect_output(print(dataInput2)$show()) invisible(capture.output(expect_error(summary(dataInput2), NA))) expect_output(summary(dataInput2)$show()) dataInput2CodeBased <- eval(parse(text = getObjectRCode(dataInput2, stringWrapParagraphWidth = NULL))) expect_equal(dataInput2CodeBased$overallSampleSizes, dataInput2$overallSampleSizes, tolerance = 1e-05) expect_equal(dataInput2CodeBased$overallEvents, dataInput2$overallEvents, tolerance = 1e-05) expect_type(names(dataInput2), "character") df <- as.data.frame(dataInput2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x3 <- getAnalysisResults(design1, dataInput2, stratifiedAnalysis = FALSE, intersectionTest = "Simes", directionUpper = FALSE, normalApproximation = FALSE ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x3' with expected results expect_equal(x3$piTreatments[1, ], 0.36666667, tolerance = 1e-07) expect_equal(x3$piTreatments[2, ], NA_real_) expect_equal(x3$conditionalRejectionProbabilities[1, ], c(0.34476337, 0.21123906, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities[2, ], c(0.34476337, 0.16889178, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.62776669, -0.44175544, -0.38366304), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.4897991, -0.29886557, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[1, ], c(0.066751342, 0.016446892, -0.050014598), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[2, ], c(0.038157503, 0.063536395, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues[1, ], c(0.10653002, 0.10653002, 0.014413851), tolerance = 1e-07) expect_equal(x3$repeatedPValues[2, ], c(0.10653002, 0.10653002, NA_real_), tolerance = 1e-07) expect_equal(x3$piControls[1, ], 0.58, tolerance = 1e-07) expect_equal(x3$piControls[2, ], NA_real_) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$piTreatments, x3$piTreatments, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_equal(x3CodeBased$piControls, x3$piControls, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x4 <- getAnalysisResults(design1, dataInput2, stratifiedAnalysis = TRUE, intersectionTest = "Simes", directionUpper = FALSE, normalApproximation = TRUE ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x4' with expected results expect_equal(x4$piTreatments[1, ], 0.36666667, tolerance = 1e-07) expect_equal(x4$piTreatments[2, ], NA_real_) expect_equal(x4$conditionalRejectionProbabilities[1, ], c(0.4519333, 0.45336181, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities[2, ], c(0.4519333, 0.2823056, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalPower[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x4$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.62776669, -0.44175544, -0.38366304), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.48811625, -0.29740945, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds[1, ], c(0.066751342, 0.016446892, -0.050014598), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds[2, ], c(0.041874626, 0.06452777, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues[1, ], c(0.07212343, 0.050354903, 0.0033350387), tolerance = 1e-07) expect_equal(x4$repeatedPValues[2, ], c(0.07212343, 0.065501128, NA_real_), tolerance = 1e-07) expect_equal(x4$piControls[1, ], 0.58, tolerance = 1e-07) expect_equal(x4$piControls[2, ], NA_real_) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$piTreatments, x4$piTreatments, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalRejectionProbabilities, x4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPower, x4$conditionalPower, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalLowerBounds, x4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalUpperBounds, x4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedPValues, x4$repeatedPValues, tolerance = 1e-05) expect_equal(x4CodeBased$piControls, x4$piControls, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': enrichment rates, more sub-populations, select S1 and S2 at first IA, select S1 at second, directionUpper = TRUE, gMax = 3", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichmentRates} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedTestEnrichmentRates} # @refFS[Formula]{fs:testStatisticEnrichmentRates} S1 <- getDataset( sampleSize1 = c(47, 33, 37), sampleSize2 = c(48, 47, 39), events1 = c(18, 13, 17), events2 = c(12, 11, 9) ) S2 <- getDataset( sampleSize1 = c(49, NA, NA), sampleSize2 = c(45, NA, NA), events1 = c(12, NA, NA), events2 = c(13, NA, NA) ) S12 <- getDataset( sampleSize1 = c(35, 42, NA), sampleSize2 = c(36, 47, NA), events1 = c(19, 10, NA), events2 = c(13, 17, NA) ) R <- getDataset( sampleSize1 = c(43, NA, NA), sampleSize2 = c(39, NA, NA), events1 = c(17, NA, NA), events2 = c(14, NA, NA) ) dataInput3 <- getDataset(S1 = S1, S2 = S2, S12 = S12, R = R) ## Comparison of the results of DatasetRates object 'dataInput3' with expected results expect_equal(dataInput3$overallSampleSizes, c(47, 49, 35, 43, 48, 45, 36, 39, 80, NA_real_, 77, NA_real_, 95, NA_real_, 83, NA_real_, 117, NA_real_, NA_real_, NA_real_, 134, NA_real_, NA_real_, NA_real_)) expect_equal(dataInput3$overallEvents, c(18, 12, 19, 17, 12, 13, 13, 14, 31, NA_real_, 29, NA_real_, 23, NA_real_, 30, NA_real_, 48, NA_real_, NA_real_, NA_real_, 32, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput3), NA))) expect_output(print(dataInput3)$show()) invisible(capture.output(expect_error(summary(dataInput3), NA))) expect_output(summary(dataInput3)$show()) dataInput3CodeBased <- eval(parse(text = getObjectRCode(dataInput3, stringWrapParagraphWidth = NULL))) expect_equal(dataInput3CodeBased$overallSampleSizes, dataInput3$overallSampleSizes, tolerance = 1e-05) expect_equal(dataInput3CodeBased$overallEvents, dataInput3$overallEvents, tolerance = 1e-05) expect_type(names(dataInput3), "character") df <- as.data.frame(dataInput3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design1 <- getDesignInverseNormal(kMax = 3, alpha = 0.05, typeOfDesign = "WT", deltaWT = 0.1, informationRates = c(0.4, 0.7, 1)) x1 <- getAnalysisResults(design1, dataInput3, directionUpper = TRUE, stratifiedAnalysis = FALSE, intersectionTest = "Sidak", allocationRatioPlanned = 3, normalApproximation = FALSE, nPlanned = c(80), piControls = c(0.2, NA, NA), piTreatments = c(0.55, NA, NA), stage = 2 ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x1' with expected results expect_equal(x1$conditionalRejectionProbabilities[1, ], c(0.15297113, 0.049132584, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[2, ], c(0.034063149, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[3, ], c(0.064895921, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower[1, ], c(NA_real_, NA_real_, 0.89354539), tolerance = 1e-07) expect_equal(x1$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.062823383, -0.036086154, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.16425035, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[3, ], c(-0.078510197, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[1, ], c(0.35743976, 0.21982839, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[2, ], c(0.25557989, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[3, ], c(0.21491638, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[1, ], c(0.23298603, 0.23298603, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[2, ], c(0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[3, ], c(0.389024, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design2 <- getDesignFisher(kMax = 3, method = "equalAlpha", alpha = 0.05, informationRates = c(0.4, 0.7, 1)) x2 <- getAnalysisResults(design2, dataInput3, directionUpper = TRUE, stratifiedAnalysis = FALSE, intersectionTest = "Sidak", normalApproximation = FALSE, stage = 3 ) ## Comparison of the results of AnalysisResultsEnrichmentFisher object 'x2' with expected results expect_equal(x2$piTreatments[1, ], 0.41025641, tolerance = 1e-07) expect_equal(x2$piTreatments[2, ], NA_real_) expect_equal(x2$piTreatments[3, ], NA_real_) expect_equal(x2$conditionalRejectionProbabilities[1, ], c(0.075105953, 0.018243594, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[2, ], c(0.020009021, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[3, ], c(0.031471245, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.023654531, -0.034180226, 0.008300518), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.12625532, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[3, ], c(-0.051634044, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[1, ], c(0.32239366, 0.19556, 0.21299371), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[2, ], c(0.21912956, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[3, ], c(0.1890798, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[1, ], c(0.14811777, 0.14811777, 0.07171335), tolerance = 1e-07) expect_equal(x2$repeatedPValues[2, ], c(0.46979052, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[3, ], c(0.32146776, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$piControls[1, ], 0.23880597, tolerance = 1e-07) expect_equal(x2$piControls[2, ], NA_real_) expect_equal(x2$piControls[3, ], NA_real_) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$piTreatments, x2$piTreatments, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$piControls, x2$piControls, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': enrichment rates, more sub-populations, non-stratified input, select S1 and S2 at first IA, select S1 at second, directionUpper = FALSE, gMax = 4", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichmentRates} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedTestEnrichmentRates} # @refFS[Formula]{fs:testStatisticEnrichmentRates} S1 <- getDataset( sampleSize1 = c(84, 94, 25), sampleSize2 = c(82, 75, 23), events1 = c(21, 28, 13), events2 = c(32, 23, 20) ) S2 <- getDataset( sampleSize1 = c(81, 95, NA), sampleSize2 = c(84, 64, NA), events1 = c(26, 29, NA), events2 = c(31, 26, NA) ) S3 <- getDataset( sampleSize1 = c(71, NA, NA), sampleSize2 = c(74, NA, NA), events1 = c(16, NA, NA), events2 = c(21, NA, NA) ) F <- getDataset( sampleSize1 = c(248, NA, NA), sampleSize2 = c(254, NA, NA), events1 = c(75, NA, NA), events2 = c(98, NA, NA) ) R <- getDataset( sampleSize1 = c(12, NA, NA), sampleSize2 = c(14, NA, NA), events1 = c(12, NA, NA), events2 = c(14, NA, NA) ) dataInput4 <- getDataset(S1 = S1, S2 = S2, S3 = S3, F = F) ## Comparison of the results of DatasetRates object 'dataInput4' with expected results expect_equal(dataInput4$overallSampleSizes, c(84, 81, 71, 248, 82, 84, 74, 254, 178, 176, NA_real_, NA_real_, 157, 148, NA_real_, NA_real_, 203, NA_real_, NA_real_, NA_real_, 180, NA_real_, NA_real_, NA_real_)) expect_equal(dataInput4$overallEvents, c(21, 26, 16, 75, 32, 31, 21, 98, 49, 55, NA_real_, NA_real_, 55, 57, NA_real_, NA_real_, 62, NA_real_, NA_real_, NA_real_, 75, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput4), NA))) expect_output(print(dataInput4)$show()) invisible(capture.output(expect_error(summary(dataInput4), NA))) expect_output(summary(dataInput4)$show()) dataInput4CodeBased <- eval(parse(text = getObjectRCode(dataInput4, stringWrapParagraphWidth = NULL))) expect_equal(dataInput4CodeBased$overallSampleSizes, dataInput4$overallSampleSizes, tolerance = 1e-05) expect_equal(dataInput4CodeBased$overallEvents, dataInput4$overallEvents, tolerance = 1e-05) expect_type(names(dataInput4), "character") df <- as.data.frame(dataInput4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design1 <- getDesignInverseNormal(kMax = 3, alpha = 0.05, typeOfDesign = "asKD", gammaA = 2, informationRates = c(0.4, 0.7, 1)) x3 <- getAnalysisResults(design1, dataInput4, directionUpper = FALSE, stratifiedAnalysis = FALSE, intersectionTest = "Sidak", allocationRatioPlanned = 1, stage = 3, normalApproximation = TRUE ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x3' with expected results expect_equal(x3$piTreatments[1, ], 0.30541872, tolerance = 1e-07) expect_equal(x3$piTreatments[2, ], NA_real_) expect_equal(x3$piTreatments[3, ], NA_real_) expect_equal(x3$piTreatments[4, ], NA_real_) expect_equal(x3$conditionalRejectionProbabilities[1, ], c(0.13745997, 0.082835151, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities[2, ], c(0.023915975, 0.064596491, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities[3, ], c(0.023915975, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities[4, ], c(0.13745997, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPower[4, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[1, ], c(-0.33926099, -0.22469062, -0.248011), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[2, ], c(-0.255132, -0.21555052, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[3, ], c(-0.26390722, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[4, ], c(-0.20314825, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[1, ], c(0.068268149, 0.059220127, -0.0081515662), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[2, ], c(0.16378176, 0.07555087, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[3, ], c(0.15232186, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[4, ], c(0.038730826, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues[1, ], c(0.5, 0.26483774, 0.01063254), tolerance = 1e-07) expect_equal(x3$repeatedPValues[2, ], c(0.5, 0.30264322, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues[3, ], c(0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues[4, ], c(0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$piControls[1, ], 0.41666667, tolerance = 1e-07) expect_equal(x3$piControls[2, ], NA_real_) expect_equal(x3$piControls[3, ], NA_real_) expect_equal(x3$piControls[4, ], NA_real_) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$piTreatments, x3$piTreatments, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_equal(x3CodeBased$piControls, x3$piControls, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': enrichment rates, expected warning for empty subsets", { .skipTestIfDisabled() S1 <- getDataset( sampleSize1 = c(84, 94, 25), sampleSize2 = c(82, 75, 23), events1 = c(21, 28, 13), events2 = c(32, 23, 20) ) S2 <- getDataset( sampleSize1 = c(81, 95, NA), sampleSize2 = c(84, 64, NA), events1 = c(26, 29, NA), events2 = c(31, 26, NA) ) S3 <- getDataset( sampleSize1 = c(71, NA, NA), sampleSize2 = c(74, NA, NA), events1 = c(16, NA, NA), events2 = c(21, NA, NA) ) R <- getDataset( sampleSize1 = c(12, NA, NA), sampleSize2 = c(14, NA, NA), events1 = c(12, NA, NA), events2 = c(14, NA, NA) ) expect_warning(getDataset(S1 = S1, S2 = S2, S3 = S3, R = R), "The 4 undefined subsets S12, S13, S23, S123 were defined as empty subsets", fixed = TRUE ) }) rpact/tests/testthat/helper-f_analysis_base_means.R0000644000176200001440000000353714277150417022312 0ustar liggesusers## | ## | *Unit tests helper functions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 6117 $ ## | Last changed: $Date: 2022-05-04 15:55:23 +0200 (Mi, 04 Mai 2022) $ ## | Last changed by: $Author: pahlke $ ## | testGetStageResultsPlotData <- function(x, ..., nPlanned, stage = NA_integer_, allocationRatioPlanned = 1) { if (x$getDataInput()$isDatasetMeans()) { assumedStDev <- .getOptionalArgument("assumedStDev", ...) if (is.null(assumedStDev)) { assumedStDev <- x$assumedStDev return(.getConditionalPowerPlot( stageResults = x, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, assumedStDev = assumedStDev, ... )) } } else if (x$getDataInput()$isDatasetRates()) { pi2 <- .getOptionalArgument("pi2", ...) if (is.null(pi2)) { pi2 <- x$pi2 return(.getConditionalPowerPlot( stageResults = x, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi2 = pi2, ... )) } } return(.getConditionalPowerPlot( stageResults = x, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } rpact/tests/testthat/helper-f_core_utilities.R0000644000176200001440000000601114277150417021323 0ustar liggesusers## | ## | *Unit tests helper functions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 6291 $ ## | Last changed: $Date: 2022-06-13 08:36:13 +0200 (Mo, 13 Jun 2022) $ ## | getTestInformationRatesDefault <- function(kMax) { return((1:kMax) / kMax) } getTestFutilityBoundsDefault <- function(kMax) { return(rep(-6, kMax - 1)) } getTestAlpha0VecDefault <- function(kMax) { return(rep(1, kMax - 1)) } getTestInformationRates <- function(kMax) { if (kMax == 1L) { return(1) } if (kMax == 6L) { return(c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1)) } a <- 0.8 / kMax b <- c() for (i in 1:(kMax - 1)) { b <- c(b, a * i) } return(c(b, 1)) } getTestFutilityBounds <- function(kMax, fisherDesignEnabled = FALSE) { if (kMax < 2) { stop("Illegal argument: 'kMax' must be >= 2") } if (kMax == 2 && fisherDesignEnabled) { return(0.5) } k <- kMax - 1 futilityBounds <- c(2) k <- k - 1 if (k > 0) { futilityBounds <- c(1, futilityBounds) k <- k - 1 } if (k > 0) { futilityBounds <- c(rep(0, k), futilityBounds) } if (fisherDesignEnabled) { futilityBounds[futilityBounds > 0] <- futilityBounds[futilityBounds > 0] / max(futilityBounds) futilityBounds[futilityBounds == 0] <- 0.01 } return(futilityBounds) } getTestDesign <- function(kMax = NA_integer_, informationRates = NA_real_, futilityBounds = NA_real_, designClass = "TrialDesignInverseNormal") { design <- NULL currentWarningOption <- getOption("warn") options(warn = -1) if (designClass == "TrialDesignFisher") { design <- getDesignFisher( kMax = as.integer(kMax), alpha0Vec = futilityBounds, informationRates = informationRates ) } else if (designClass == "TrialDesignInverseNormal") { design <- getDesignGroupSequential( kMax = as.integer(kMax), informationRates = informationRates, futilityBounds = futilityBounds, tolerance = 1e-06 ) } else { design <- getDesignInverseNormal( kMax = as.integer(kMax), informationRates = informationRates, futilityBounds = futilityBounds, tolerance = 1e-06 ) } options(warn = currentWarningOption) return(design) } rpact/tests/testthat/test-f_simulation_enrichment_rates.R0000644000176200001440000021273514372422771023613 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_simulation_enrichment_rates.R ## | Creation date: 06 February 2023, 12:14:06 ## | File version: $Revision: 6810 $ ## | Last changed: $Date: 2023-02-13 12:58:47 +0100 (Mo, 13 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Simulation Enrichment Rates Function") test_that("'getSimulationEnrichmentRates': gMax = 2", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:subsec:adaptiveClosedTestProcedureEnrichment} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:testStatisticEnrichmentRates} # @refFS[Formula]{fs:stratifiedTestEnrichmentRates} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichmentRates} # @refFS[Formula]{fs:simulationEnrichmentRatesGenerate} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} piInput <- c(0.3, 0.5, 0.3, 0.6, 0.3, 0.7, 0.3, 0.8, 0.4, 0.5, 0.4, 0.6, 0.4, 0.7, 0.4, 0.8, 0.5, 0.5, 0.5, 0.6, 0.5, 0.7, 0.5, 0.8) effectList <- list( subGroups = c("S", "R"), prevalences = c(0.74, 0.26), piControl = c(0.3, 0.5), piTreatments = matrix(piInput, byrow = TRUE, ncol = 2) ) design <- getDesignInverseNormal(informationRates = c(0.4, 1), typeOfDesign = "WT", deltaWT = 0.1) suppressWarnings(simResult1 <- getSimulationEnrichmentRates(design, plannedSubjects = c(150, 300), effectList = effectList, maxNumberOfIterations = 100, effectMeasure = "effectEstimate", stratifiedAnalysis = TRUE, allocationRatioPlanned = 2, directionUpper = TRUE, successCriterion = "atLeastOne", typeOfSelection = "rbest", rValue = 2, intersectionTest = "SpiessensDebois", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult1' with expected results expect_equal(simResult1$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult1$iterations[2, ], c(100, 100, 99, 96, 98, 97, 93, 89, 86, 88, 65, 59)) expect_equal(simResult1$rejectAtLeastOne, c(0.03, 0.03, 0.17, 0.17, 0.26, 0.41, 0.47, 0.63, 0.8, 0.84, 0.86, 0.99), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$rejectedPopulationsPerStage)), c(0, 0.02, 0, 0.02, 0, 0.05, 0, 0.02, 0.01, 0.23, 0.03, 0.28, 0.01, 0.27, 0.02, 0.19, 0.11, 0.65, 0.09, 0.68, 0.22, 0.44, 0.18, 0.41, 0, 0.02, 0, 0.03, 0.01, 0.16, 0.04, 0.13, 0.02, 0.2, 0.03, 0.35, 0.07, 0.4, 0.11, 0.52, 0.1, 0.57, 0.1, 0.7, 0.32, 0.49, 0.39, 0.58), tolerance = 1e-07) expect_equal(simResult1$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult1$earlyStop[1, ], c(0, 0, 0.01, 0.04, 0.02, 0.03, 0.07, 0.11, 0.14, 0.12, 0.35, 0.41), tolerance = 1e-07) expect_equal(simResult1$successPerStage[1, ], c(0, 0, 0.01, 0.04, 0.02, 0.03, 0.07, 0.11, 0.14, 0.12, 0.35, 0.41), tolerance = 1e-07) expect_equal(simResult1$successPerStage[2, ], c(0.03, 0.03, 0.16, 0.13, 0.24, 0.38, 0.4, 0.52, 0.66, 0.72, 0.51, 0.58), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$selectedPopulations)), c(1, 1, 1, 1, 1, 0.99, 1, 0.96, 1, 0.98, 1, 0.97, 1, 0.93, 1, 0.89, 1, 0.86, 1, 0.88, 1, 0.65, 1, 0.59, 1, 1, 1, 1, 1, 0.99, 1, 0.96, 1, 0.98, 1, 0.97, 1, 0.93, 1, 0.89, 1, 0.86, 1, 0.88, 1, 0.65, 1, 0.59), tolerance = 1e-07) expect_equal(simResult1$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult1$numberOfPopulations[2, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult1$expectedNumberOfSubjects, c(300, 300, 298.5, 294, 297, 295.5, 289.5, 283.5, 279, 282, 247.5, 238.5), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$sampleSizes)), c(111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39)) expect_equal(simResult1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult1$conditionalPowerAchieved[2, ], c(0.057886457, 0.11722504, 0.17374263, 0.14254287, 0.24091794, 0.35196657, 0.39807899, 0.36830797, 0.54596748, 0.63396607, 0.61766608, 0.68903084), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult1), NA))) expect_output(print(simResult1)$show()) invisible(capture.output(expect_error(summary(simResult1), NA))) expect_output(summary(simResult1)$show()) suppressWarnings(simResult1CodeBased <- eval(parse(text = getObjectRCode(simResult1, stringWrapParagraphWidth = NULL)))) expect_equal(simResult1CodeBased$iterations, simResult1$iterations, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectAtLeastOne, simResult1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectedPopulationsPerStage, simResult1$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$futilityPerStage, simResult1$futilityPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$earlyStop, simResult1$earlyStop, tolerance = 1e-05) expect_equal(simResult1CodeBased$successPerStage, simResult1$successPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$selectedPopulations, simResult1$selectedPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$numberOfPopulations, simResult1$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$expectedNumberOfSubjects, simResult1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult1CodeBased$sampleSizes, simResult1$sampleSizes, tolerance = 1e-05) expect_equal(simResult1CodeBased$conditionalPowerAchieved, simResult1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult1), "character") df <- as.data.frame(simResult1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfNotX64() suppressWarnings(simResult2 <- getSimulationEnrichmentRates(design, plannedSubjects = c(150, 300), effectList = effectList, maxNumberOfIterations = 100, effectMeasure = "effectEstimate", stratifiedAnalysis = TRUE, piTreatmentH1 = 0.6, piControlH1 = 0.45, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 150), maxNumberOfSubjectsPerStage = c(NA, 600), allocationRatioPlanned = 2, directionUpper = TRUE, successCriterion = "atLeastOne", typeOfSelection = "epsilon", epsilonValue = 0.025, intersectionTest = "Simes", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult2' with expected results expect_equal(simResult2$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult2$iterations[2, ], c(99, 100, 99, 96, 100, 95, 91, 91, 83, 79, 79, 63)) expect_equal(simResult2$rejectAtLeastOne, c(0.04, 0.04, 0.15, 0.36, 0.41, 0.54, 0.7, 0.92, 0.94, 0.93, 0.97, 0.98), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$rejectedPopulationsPerStage)), c(0.01, 0.03, 0, 0.01, 0.01, 0.01, 0, 0, 0, 0.36, 0.03, 0.31, 0.03, 0.22, 0.02, 0.12, 0.16, 0.73, 0.18, 0.55, 0.16, 0.45, 0.23, 0.29, 0, 0.01, 0, 0.03, 0.01, 0.14, 0.04, 0.32, 0, 0.14, 0.04, 0.38, 0.09, 0.52, 0.09, 0.8, 0.11, 0.23, 0.2, 0.44, 0.21, 0.57, 0.37, 0.51), tolerance = 1e-07) expect_equal(simResult2$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult2$earlyStop[1, ], c(0.01, 0, 0.01, 0.04, 0, 0.05, 0.09, 0.09, 0.17, 0.21, 0.21, 0.37), tolerance = 1e-07) expect_equal(simResult2$successPerStage[1, ], c(0.01, 0, 0.01, 0.04, 0, 0.05, 0.09, 0.09, 0.17, 0.21, 0.21, 0.37), tolerance = 1e-07) expect_equal(simResult2$successPerStage[2, ], c(0.03, 0.04, 0.14, 0.32, 0.41, 0.49, 0.61, 0.83, 0.77, 0.72, 0.76, 0.61), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$selectedPopulations)), c(1, 0.64, 1, 0.55, 1, 0.3, 1, 0.13, 1, 0.87, 1, 0.63, 1, 0.44, 1, 0.28, 1, 0.77, 1, 0.62, 1, 0.49, 1, 0.32, 1, 0.71, 1, 0.78, 1, 0.93, 1, 0.95, 1, 0.44, 1, 0.74, 1, 0.79, 1, 0.87, 1, 0.31, 1, 0.49, 1, 0.59, 1, 0.51), tolerance = 1e-07) expect_equal(simResult2$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult2$numberOfPopulations[2, ], c(1.3636364, 1.33, 1.2424242, 1.125, 1.31, 1.4421053, 1.3516484, 1.2637363, 1.3012048, 1.4050633, 1.3670886, 1.3174603), tolerance = 1e-07) expect_equal(simResult2$expectedNumberOfSubjects, c(669.59264, 671.53258, 620.0907, 573.83864, 556.82907, 514.33552, 439.19492, 418.05629, 385.4022, 357.09909, 335.03201, 280.36711), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$sampleSizes)), c(111, 424.65084, 111, 412.59405, 111, 358.73817, 111, 327.31465, 111, 356.03736, 111, 302.73985, 111, 244.22837, 111, 220.27012, 111, 253.59117, 111, 214.59387, 111, 187.06662, 111, 161.81648, 39, 100.19021, 39, 108.93853, 39, 116.10091, 39, 114.18394, 39, 50.79171, 39, 80.771228, 39, 73.56825, 39, 74.297232, 39, 30.025946, 39, 47.556877, 39, 47.151107, 39, 45.115446), tolerance = 1e-07) expect_equal(simResult2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult2$conditionalPowerAchieved[2, ], c(0.47580257, 0.49594366, 0.54143038, 0.56498304, 0.65590031, 0.69185697, 0.74958231, 0.78227803, 0.78802696, 0.82212774, 0.82750537, 0.8268688), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult2), NA))) expect_output(print(simResult2)$show()) invisible(capture.output(expect_error(summary(simResult2), NA))) expect_output(summary(simResult2)$show()) suppressWarnings(simResult2CodeBased <- eval(parse(text = getObjectRCode(simResult2, stringWrapParagraphWidth = NULL)))) expect_equal(simResult2CodeBased$iterations, simResult2$iterations, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectAtLeastOne, simResult2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectedPopulationsPerStage, simResult2$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$futilityPerStage, simResult2$futilityPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$earlyStop, simResult2$earlyStop, tolerance = 1e-05) expect_equal(simResult2CodeBased$successPerStage, simResult2$successPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$selectedPopulations, simResult2$selectedPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$numberOfPopulations, simResult2$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$expectedNumberOfSubjects, simResult2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult2CodeBased$sampleSizes, simResult2$sampleSizes, tolerance = 1e-05) expect_equal(simResult2CodeBased$conditionalPowerAchieved, simResult2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult2), "character") df <- as.data.frame(simResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult3 <- getSimulationEnrichmentRates(design, plannedSubjects = c(150, 300), effectList = effectList, maxNumberOfIterations = 100, effectMeasure = "effectEstimate", stratifiedAnalysis = TRUE, allocationRatioPlanned = 2, directionUpper = TRUE, successCriterion = "atLeastOne", typeOfSelection = "best", intersectionTest = "Sidak", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult3' with expected results expect_equal(simResult3$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult3$iterations[2, ], c(100, 100, 98, 98, 100, 97, 94, 87, 84, 89, 77, 61)) expect_equal(simResult3$rejectAtLeastOne, c(0.01, 0.03, 0.15, 0.21, 0.19, 0.31, 0.51, 0.62, 0.85, 0.78, 0.91, 0.96), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$rejectedPopulationsPerStage)), c(0, 0.01, 0, 0.01, 0, 0, 0, 0, 0, 0.17, 0.03, 0.16, 0.03, 0.1, 0.02, 0.03, 0.12, 0.58, 0.1, 0.49, 0.19, 0.44, 0.19, 0.11, 0, 0, 0, 0.02, 0.02, 0.13, 0.02, 0.19, 0, 0.02, 0.03, 0.12, 0.06, 0.35, 0.13, 0.46, 0.13, 0.11, 0.1, 0.18, 0.21, 0.24, 0.36, 0.46), tolerance = 1e-07) expect_equal(simResult3$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult3$earlyStop[1, ], c(0, 0, 0.02, 0.02, 0, 0.03, 0.06, 0.13, 0.16, 0.11, 0.23, 0.39), tolerance = 1e-07) expect_equal(simResult3$successPerStage[1, ], c(0, 0, 0.02, 0.02, 0, 0.03, 0.06, 0.13, 0.16, 0.11, 0.23, 0.39), tolerance = 1e-07) expect_equal(simResult3$successPerStage[2, ], c(0.01, 0.03, 0.13, 0.19, 0.19, 0.28, 0.45, 0.49, 0.69, 0.67, 0.68, 0.57), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$selectedPopulations)), c(1, 0.44, 1, 0.31, 1, 0.13, 1, 0.09, 1, 0.71, 1, 0.45, 1, 0.24, 1, 0.14, 1, 0.68, 1, 0.59, 1, 0.48, 1, 0.12, 1, 0.56, 1, 0.69, 1, 0.85, 1, 0.89, 1, 0.29, 1, 0.52, 1, 0.7, 1, 0.73, 1, 0.16, 1, 0.3, 1, 0.29, 1, 0.49), tolerance = 1e-07) expect_equal(simResult3$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult3$numberOfPopulations[2, ], c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)) expect_equal(simResult3$expectedNumberOfSubjects, c(300, 300, 297, 297, 300, 295.5, 291, 280.5, 276, 283.5, 265.5, 241.5), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$sampleSizes)), c(111, 128.16, 111, 123.09, 111, 116.17347, 111, 114.58163, 111, 138.69, 111, 129.09278, 111, 120.95745, 111, 117.27586, 111, 142.57143, 111, 136.85393, 111, 135.31169, 111, 118.67213, 39, 21.84, 39, 26.91, 39, 33.826531, 39, 35.418367, 39, 11.31, 39, 20.907216, 39, 29.042553, 39, 32.724138, 39, 7.4285714, 39, 13.146067, 39, 14.688312, 39, 31.327869), tolerance = 1e-07) expect_equal(simResult3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult3$conditionalPowerAchieved[2, ], c(0.083063533, 0.12244222, 0.16903461, 0.19341855, 0.20869939, 0.28782427, 0.42698224, 0.4072498, 0.57493889, 0.6368279, 0.70412178, 0.6855194), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult3), NA))) expect_output(print(simResult3)$show()) invisible(capture.output(expect_error(summary(simResult3), NA))) expect_output(summary(simResult3)$show()) suppressWarnings(simResult3CodeBased <- eval(parse(text = getObjectRCode(simResult3, stringWrapParagraphWidth = NULL)))) expect_equal(simResult3CodeBased$iterations, simResult3$iterations, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectAtLeastOne, simResult3$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectedPopulationsPerStage, simResult3$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$futilityPerStage, simResult3$futilityPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$earlyStop, simResult3$earlyStop, tolerance = 1e-05) expect_equal(simResult3CodeBased$successPerStage, simResult3$successPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$selectedPopulations, simResult3$selectedPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$numberOfPopulations, simResult3$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$expectedNumberOfSubjects, simResult3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult3CodeBased$sampleSizes, simResult3$sampleSizes, tolerance = 1e-05) expect_equal(simResult3CodeBased$conditionalPowerAchieved, simResult3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult3), "character") df <- as.data.frame(simResult3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult4 <- getSimulationEnrichmentRates(design, plannedSubjects = c(150, 300), effectList = effectList, maxNumberOfIterations = 100, effectMeasure = "effectEstimate", stratifiedAnalysis = TRUE, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 150), maxNumberOfSubjectsPerStage = c(NA, 600), allocationRatioPlanned = 2, directionUpper = TRUE, successCriterion = "atLeastOne", typeOfSelection = "epsilon", epsilonValue = 0.025, intersectionTest = "Bonferroni", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult4' with expected results expect_equal(simResult4$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult4$iterations[2, ], c(57, 60, 73, 78, 91, 90, 86, 84, 80, 79, 75, 63)) expect_equal(simResult4$rejectAtLeastOne, c(0.02, 0.02, 0.13, 0.38, 0.43, 0.49, 0.63, 0.82, 0.84, 0.87, 0.95, 0.97), tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$rejectedPopulationsPerStage)), c(0, 0.01, 0, 0, 0, 0, 0, 0.02, 0, 0.38, 0.02, 0.31, 0.03, 0.16, 0.01, 0.11, 0.16, 0.63, 0.17, 0.53, 0.18, 0.49, 0.18, 0.33, 0, 0.01, 0, 0.02, 0.01, 0.12, 0.04, 0.34, 0, 0.15, 0.02, 0.31, 0.09, 0.5, 0.12, 0.66, 0.09, 0.23, 0.16, 0.37, 0.24, 0.49, 0.37, 0.48), tolerance = 1e-07) expect_equal(simResult4$futilityPerStage[1, ], c(0.43, 0.4, 0.26, 0.18, 0.09, 0.08, 0.05, 0.04, 0.03, 0.01, 0.01, 0), tolerance = 1e-07) expect_equal(simResult4$earlyStop[1, ], c(0.43, 0.4, 0.27, 0.22, 0.09, 0.1, 0.14, 0.16, 0.2, 0.21, 0.25, 0.37), tolerance = 1e-07) expect_equal(simResult4$successPerStage[1, ], c(0, 0, 0.01, 0.04, 0, 0.02, 0.09, 0.12, 0.17, 0.2, 0.24, 0.37), tolerance = 1e-07) expect_equal(simResult4$successPerStage[2, ], c(0.02, 0.02, 0.12, 0.34, 0.43, 0.47, 0.54, 0.7, 0.67, 0.67, 0.71, 0.6), tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$selectedPopulations)), c(1, 0.47, 1, 0.28, 1, 0.21, 1, 0.15, 1, 0.79, 1, 0.64, 1, 0.43, 1, 0.23, 1, 0.73, 1, 0.61, 1, 0.53, 1, 0.36, 1, 0.3, 1, 0.46, 1, 0.67, 1, 0.76, 1, 0.43, 1, 0.62, 1, 0.77, 1, 0.8, 1, 0.31, 1, 0.47, 1, 0.52, 1, 0.5), tolerance = 1e-07) expect_equal(simResult4$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult4$numberOfPopulations[2, ], c(1.3508772, 1.2333333, 1.2054795, 1.1666667, 1.3406593, 1.4, 1.3953488, 1.2261905, 1.3, 1.3670886, 1.4, 1.3650794), tolerance = 1e-07) expect_equal(simResult4$expectedNumberOfSubjects, c(453.37572, 447.96694, 541.237, 483.73584, 535.2315, 511.41354, 448.69764, 410.71972, 362.5422, 354.38041, 338.45385, 285.45619), tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$sampleSizes)), c(111, 459.21285, 111, 397.21147, 111, 406.83057, 111, 317.9318, 111, 364.75188, 111, 324.0256, 111, 263.10197, 111, 232.55273, 111, 230.38271, 111, 215.59706, 111, 201.103, 111, 168.7985, 39, 73.025259, 39, 99.400093, 39, 129.11053, 39, 109.93466, 39, 58.579437, 39, 77.545002, 39, 84.220859, 39, 77.827889, 39, 35.295041, 39, 43.112321, 39, 50.1688, 39, 46.211325), tolerance = 1e-07) expect_equal(simResult4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult4$conditionalPowerAchieved[2, ], c(0.17722261, 0.20630429, 0.22165392, 0.29435606, 0.38613941, 0.45798394, 0.53716481, 0.50557573, 0.59360581, 0.71535155, 0.72089862, 0.74669086), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult4), NA))) expect_output(print(simResult4)$show()) invisible(capture.output(expect_error(summary(simResult4), NA))) expect_output(summary(simResult4)$show()) suppressWarnings(simResult4CodeBased <- eval(parse(text = getObjectRCode(simResult4, stringWrapParagraphWidth = NULL)))) expect_equal(simResult4CodeBased$iterations, simResult4$iterations, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectAtLeastOne, simResult4$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectedPopulationsPerStage, simResult4$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$futilityPerStage, simResult4$futilityPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$earlyStop, simResult4$earlyStop, tolerance = 1e-05) expect_equal(simResult4CodeBased$successPerStage, simResult4$successPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$selectedPopulations, simResult4$selectedPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$numberOfPopulations, simResult4$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$expectedNumberOfSubjects, simResult4$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult4CodeBased$sampleSizes, simResult4$sampleSizes, tolerance = 1e-05) expect_equal(simResult4CodeBased$conditionalPowerAchieved, simResult4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult4), "character") df <- as.data.frame(simResult4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationEnrichmentRates': gMax = 3", { .skipTestIfDisabled() .skipTestIfNotX64() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:subsec:adaptiveClosedTestProcedureEnrichment} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:testStatisticEnrichmentRates} # @refFS[Formula]{fs:stratifiedTestEnrichmentRates} # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:simulationEnrichmentRatesGenerate} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} piTreatments <- c( 0.30, 0.40, 0.30, 0.55, 0.30, 0.40, 0.30, 0.75, 0.30, 0.40, 0.50, 0.55, 0.30, 0.40, 0.50, 0.75, 0.30, 0.60, 0.30, 0.55, 0.30, 0.60, 0.30, 0.75, 0.30, 0.60, 0.50, 0.55, 0.30, 0.60, 0.50, 0.75, 0.50, 0.40, 0.30, 0.55, 0.50, 0.40, 0.30, 0.75, 0.50, 0.40, 0.50, 0.55, 0.50, 0.40, 0.50, 0.75, 0.50, 0.60, 0.30, 0.55, 0.50, 0.60, 0.30, 0.75, 0.50, 0.60, 0.50, 0.55, 0.50, 0.60, 0.50, 0.75 ) effectList <- list( subGroups = c("S1", "S2", "S12", "R"), prevalences = c(0.1, 0.4, 0.2, 0.3), piControls = c(0.3, 0.4, 0.3, 0.55), piTreatments = matrix(piTreatments, byrow = TRUE, ncol = 4) ) design <- getDesignInverseNormal(informationRates = c(0.5, 1), typeOfDesign = "noEarlyEfficacy") suppressWarnings(simResult1 <- getSimulationEnrichmentRates(design, plannedSubjects = c(150, 300), effectList = effectList, maxNumberOfIterations = 100, effectMeasure = "effectEstimate", stratifiedAnalysis = TRUE, allocationRatioPlanned = 1.5, directionUpper = TRUE, successCriterion = "atLeastOne", typeOfSelection = "epsilon", epsilonValue = 0.025, intersectionTest = "Sidak", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult1' with expected results expect_equal(simResult1$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult1$iterations[2, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult1$rejectAtLeastOne, c(0.01, 0.11, 0.34, 0.41, 0.43, 0.52, 0.64, 0.76, 0.1, 0.13, 0.58, 0.58, 0.37, 0.63, 0.8, 0.88), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$rejectedPopulationsPerStage)), c(0, 0.01, 0, 0.01, 0, 0.29, 0, 0.31, 0, 0, 0, 0.02, 0, 0.16, 0, 0.1, 0, 0.08, 0, 0.08, 0, 0.51, 0, 0.42, 0, 0.08, 0, 0.06, 0, 0.43, 0, 0.4, 0, 0, 0, 0.01, 0, 0.06, 0, 0.03, 0, 0.4, 0, 0.26, 0, 0.48, 0, 0.51, 0, 0.01, 0, 0, 0, 0.05, 0, 0.03, 0, 0.26, 0, 0.19, 0, 0.42, 0, 0.37, 0, 0, 0, 0.1, 0, 0.02, 0, 0.09, 0, 0.03, 0, 0.35, 0, 0.02, 0, 0.31, 0, 0.01, 0, 0.05, 0, 0.03, 0, 0.16, 0, 0.09, 0, 0.5, 0, 0.1, 0, 0.35), tolerance = 1e-07) expect_equal(simResult1$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult1$earlyStop[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult1$successPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult1$successPerStage[2, ], c(0.01, 0.11, 0.34, 0.41, 0.43, 0.52, 0.64, 0.76, 0.1, 0.13, 0.58, 0.58, 0.37, 0.63, 0.8, 0.88), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$selectedPopulations)), c(1, 0.45, 1, 0.41, 1, 0.76, 1, 0.65, 1, 0.14, 1, 0.09, 1, 0.39, 1, 0.24, 1, 0.6, 1, 0.56, 1, 0.81, 1, 0.74, 1, 0.31, 1, 0.17, 1, 0.59, 1, 0.55, 1, 0.38, 1, 0.15, 1, 0.3, 1, 0.18, 1, 0.81, 1, 0.62, 1, 0.66, 1, 0.63, 1, 0.28, 1, 0.17, 1, 0.22, 1, 0.14, 1, 0.73, 1, 0.35, 1, 0.54, 1, 0.42, 1, 0.41, 1, 0.64, 1, 0.19, 1, 0.35, 1, 0.23, 1, 0.67, 1, 0.16, 1, 0.46, 1, 0.32, 1, 0.59, 1, 0.14, 1, 0.38, 1, 0.32, 1, 0.78, 1, 0.14, 1, 0.4), tolerance = 1e-07) expect_equal(simResult1$numberOfPopulations[1, ], c(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3)) expect_equal(simResult1$numberOfPopulations[2, ], c(1.24, 1.2, 1.25, 1.18, 1.18, 1.38, 1.21, 1.33, 1.2, 1.32, 1.17, 1.26, 1.36, 1.3, 1.27, 1.37), tolerance = 1e-07) expect_equal(simResult1$expectedNumberOfSubjects, c(300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$sampleSizes)), c(15, 23.007143, 15, 24.6, 15, 35.778571, 15, 33.607143, 15, 8.0928571, 15, 12.764286, 15, 17.042857, 15, 13.757143, 15, 29.442857, 15, 27.278571, 15, 37.6, 15, 34.414286, 15, 15.3, 15, 16.7, 15, 25.742857, 15, 25.071429, 60, 51.028571, 60, 44.4, 60, 29.114286, 60, 30.428571, 60, 82.371429, 60, 68.057143, 60, 67.171429, 60, 69.028571, 60, 38.771429, 60, 40.114286, 60, 25.4, 60, 27.657143, 60, 68.2, 60, 58.8, 60, 51.971429, 60, 47.285714, 30, 57.514286, 30, 52.2, 30, 76.557143, 30, 70.214286, 30, 49.185714, 30, 39.028571, 30, 58.585714, 30, 46.514286, 30, 67.385714, 30, 56.057143, 30, 80.7, 30, 70.828571, 30, 52.1, 30, 39.4, 30, 65.985714, 30, 59.642857, 45, 18.45, 45, 28.8, 45, 8.55, 45, 15.75, 45, 10.35, 45, 30.15, 45, 7.2, 45, 20.7, 45, 14.4, 45, 26.55, 45, 6.3, 45, 17.1, 45, 14.4, 45, 35.1, 45, 6.3, 45, 18), tolerance = 1e-07) expect_equal(simResult1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult1$conditionalPowerAchieved[2, ], c(0.052366398, 0.10235816, 0.23768651, 0.28614763, 0.25721791, 0.27114584, 0.42018555, 0.53367483, 0.094822282, 0.17558915, 0.27651135, 0.31521608, 0.31906941, 0.3984128, 0.57056973, 0.7055787), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult1), NA))) expect_output(print(simResult1)$show()) invisible(capture.output(expect_error(summary(simResult1), NA))) expect_output(summary(simResult1)$show()) suppressWarnings(simResult1CodeBased <- eval(parse(text = getObjectRCode(simResult1, stringWrapParagraphWidth = NULL)))) expect_equal(simResult1CodeBased$iterations, simResult1$iterations, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectAtLeastOne, simResult1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectedPopulationsPerStage, simResult1$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$futilityPerStage, simResult1$futilityPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$earlyStop, simResult1$earlyStop, tolerance = 1e-05) expect_equal(simResult1CodeBased$successPerStage, simResult1$successPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$selectedPopulations, simResult1$selectedPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$numberOfPopulations, simResult1$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$expectedNumberOfSubjects, simResult1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult1CodeBased$sampleSizes, simResult1$sampleSizes, tolerance = 1e-05) expect_equal(simResult1CodeBased$conditionalPowerAchieved, simResult1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult1), "character") df <- as.data.frame(simResult1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult2 <- getSimulationEnrichmentRates(design, plannedSubjects = c(150, 300), effectList = effectList, maxNumberOfIterations = 100, effectMeasure = "effectEstimate", stratifiedAnalysis = TRUE, allocationRatioPlanned = 1.5, directionUpper = TRUE, successCriterion = "atLeastOne", typeOfSelection = "rbest", rValue = 2, intersectionTest = "Bonferroni", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult2' with expected results expect_equal(simResult2$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult2$iterations[2, ], c(63, 72, 78, 88, 91, 93, 97, 97, 67, 72, 91, 96, 91, 97, 99, 99)) expect_equal(simResult2$rejectAtLeastOne, c(0.03, 0.1, 0.11, 0.34, 0.28, 0.42, 0.7, 0.76, 0.04, 0.18, 0.38, 0.29, 0.27, 0.62, 0.79, 0.82), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$rejectedPopulationsPerStage)), c(0, 0.01, 0, 0.01, 0, 0.09, 0, 0.21, 0, 0.03, 0, 0.01, 0, 0.18, 0, 0.16, 0, 0.02, 0, 0.07, 0, 0.36, 0, 0.19, 0, 0.06, 0, 0.09, 0, 0.53, 0, 0.45, 0, 0.03, 0, 0.01, 0, 0.04, 0, 0.05, 0, 0.25, 0, 0.29, 0, 0.64, 0, 0.57, 0, 0, 0, 0.01, 0, 0.12, 0, 0.06, 0, 0.24, 0, 0.31, 0, 0.7, 0, 0.5, 0, 0.02, 0, 0.09, 0, 0.03, 0, 0.28, 0, 0.09, 0, 0.37, 0, 0.22, 0, 0.6, 0, 0.02, 0, 0.15, 0, 0.06, 0, 0.17, 0, 0.16, 0, 0.54, 0, 0.18, 0, 0.52), tolerance = 1e-07) expect_equal(simResult2$futilityPerStage[1, ], c(0.37, 0.28, 0.22, 0.12, 0.09, 0.07, 0.03, 0.03, 0.33, 0.28, 0.09, 0.04, 0.09, 0.03, 0.01, 0.01), tolerance = 1e-07) expect_equal(simResult2$earlyStop[1, ], c(0.37, 0.28, 0.22, 0.12, 0.09, 0.07, 0.03, 0.03, 0.33, 0.28, 0.09, 0.04, 0.09, 0.03, 0.01, 0.01), tolerance = 1e-07) expect_equal(simResult2$successPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult2$successPerStage[2, ], c(0.03, 0.1, 0.11, 0.34, 0.28, 0.42, 0.7, 0.76, 0.04, 0.18, 0.38, 0.29, 0.27, 0.62, 0.79, 0.82), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$selectedPopulations)), c(1, 0.37, 1, 0.43, 1, 0.66, 1, 0.66, 1, 0.23, 1, 0.14, 1, 0.55, 1, 0.37, 1, 0.54, 1, 0.54, 1, 0.87, 1, 0.87, 1, 0.42, 1, 0.35, 1, 0.82, 1, 0.66, 1, 0.46, 1, 0.33, 1, 0.59, 1, 0.38, 1, 0.88, 1, 0.86, 1, 0.9, 1, 0.8, 1, 0.25, 1, 0.24, 1, 0.52, 1, 0.26, 1, 0.79, 1, 0.73, 1, 0.85, 1, 0.68, 1, 0.43, 1, 0.68, 1, 0.31, 1, 0.72, 1, 0.71, 1, 0.86, 1, 0.49, 1, 0.77, 1, 0.55, 1, 0.66, 1, 0.43, 1, 0.79, 1, 0.61, 1, 0.86, 1, 0.31, 1, 0.64), tolerance = 1e-07) expect_equal(simResult2$numberOfPopulations[1, ], c(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3)) expect_equal(simResult2$numberOfPopulations[2, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult2$expectedNumberOfSubjects, c(244.5, 258, 267, 282, 286.5, 289.5, 295.5, 295.5, 250.5, 258, 286.5, 294, 286.5, 295.5, 298.5, 298.5), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$sampleSizes)), c(15, 17.040816, 15, 15.357143, 15, 18.873626, 15, 16.168831, 15, 16.412873, 15, 15.483871, 15, 18.181149, 15, 16.325479, 15, 16.151386, 15, 15.535714, 15, 18.390895, 15, 16.138393, 15, 17.119309, 15, 15.729013, 15, 19.415584, 15, 17.272727, 60, 68.163265, 60, 61.428571, 60, 75.494505, 60, 64.675325, 60, 65.651491, 60, 61.935484, 60, 72.724595, 60, 65.301915, 60, 64.605544, 60, 62.142857, 60, 73.563579, 60, 64.553571, 60, 68.477237, 60, 62.916053, 60, 77.662338, 60, 69.090909, 30, 34.081633, 30, 30.714286, 30, 37.747253, 30, 32.337662, 30, 32.825746, 30, 30.967742, 30, 36.362297, 30, 32.650957, 30, 32.302772, 30, 31.071429, 30, 36.78179, 30, 32.276786, 30, 34.238619, 30, 31.458027, 30, 38.831169, 30, 34.545455, 45, 30.714286, 45, 42.5, 45, 17.884615, 45, 36.818182, 45, 35.10989, 45, 41.612903, 45, 22.731959, 45, 35.721649, 45, 36.940299, 45, 41.25, 45, 21.263736, 45, 37.03125, 45, 30.164835, 45, 39.896907, 45, 14.090909, 45, 29.090909), tolerance = 1e-07) expect_equal(simResult2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult2$conditionalPowerAchieved[2, ], c(0.098541448, 0.1603324, 0.18848191, 0.33019209, 0.1726177, 0.23217693, 0.48938782, 0.5528132, 0.15183095, 0.21072686, 0.29316228, 0.34756908, 0.32894823, 0.41694547, 0.62874091, 0.68601647), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult2), NA))) expect_output(print(simResult2)$show()) invisible(capture.output(expect_error(summary(simResult2), NA))) expect_output(summary(simResult2)$show()) suppressWarnings(simResult2CodeBased <- eval(parse(text = getObjectRCode(simResult2, stringWrapParagraphWidth = NULL)))) expect_equal(simResult2CodeBased$iterations, simResult2$iterations, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectAtLeastOne, simResult2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectedPopulationsPerStage, simResult2$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$futilityPerStage, simResult2$futilityPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$earlyStop, simResult2$earlyStop, tolerance = 1e-05) expect_equal(simResult2CodeBased$successPerStage, simResult2$successPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$selectedPopulations, simResult2$selectedPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$numberOfPopulations, simResult2$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$expectedNumberOfSubjects, simResult2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult2CodeBased$sampleSizes, simResult2$sampleSizes, tolerance = 1e-05) expect_equal(simResult2CodeBased$conditionalPowerAchieved, simResult2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult2), "character") df <- as.data.frame(simResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult3 <- getSimulationEnrichmentRates(design, plannedSubjects = c(150, 300), effectList = effectList, maxNumberOfIterations = 100, effectMeasure = "effectEstimate", stratifiedAnalysis = FALSE, allocationRatioPlanned = 1.5, directionUpper = TRUE, successCriterion = "atLeastOne", typeOfSelection = "epsilon", epsilonValue = 0.025, intersectionTest = "Simes", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult3' with expected results expect_equal(simResult3$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult3$iterations[2, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult3$rejectAtLeastOne, c(0.01, 0.09, 0.33, 0.41, 0.43, 0.49, 0.64, 0.74, 0.09, 0.13, 0.6, 0.55, 0.37, 0.59, 0.82, 0.87), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$rejectedPopulationsPerStage)), c(0, 0.01, 0, 0.01, 0, 0.28, 0, 0.32, 0, 0, 0, 0.02, 0, 0.15, 0, 0.11, 0, 0.07, 0, 0.08, 0, 0.53, 0, 0.42, 0, 0.08, 0, 0.05, 0, 0.45, 0, 0.42, 0, 0, 0, 0.01, 0, 0.05, 0, 0.03, 0, 0.4, 0, 0.25, 0, 0.5, 0, 0.5, 0, 0.01, 0, 0.01, 0, 0.05, 0, 0.03, 0, 0.25, 0, 0.17, 0, 0.44, 0, 0.37, 0, 0, 0, 0.08, 0, 0.02, 0, 0.08, 0, 0.03, 0, 0.31, 0, 0.01, 0, 0.28, 0, 0.01, 0, 0.04, 0, 0.03, 0, 0.14, 0, 0.11, 0, 0.46, 0, 0.1, 0, 0.34), tolerance = 1e-07) expect_equal(simResult3$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult3$earlyStop[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult3$successPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult3$successPerStage[2, ], c(0.01, 0.09, 0.33, 0.41, 0.43, 0.49, 0.64, 0.74, 0.09, 0.13, 0.6, 0.55, 0.37, 0.59, 0.82, 0.87), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$selectedPopulations)), c(1, 0.45, 1, 0.41, 1, 0.76, 1, 0.65, 1, 0.14, 1, 0.09, 1, 0.39, 1, 0.24, 1, 0.6, 1, 0.56, 1, 0.81, 1, 0.74, 1, 0.31, 1, 0.17, 1, 0.59, 1, 0.55, 1, 0.38, 1, 0.15, 1, 0.3, 1, 0.18, 1, 0.81, 1, 0.62, 1, 0.66, 1, 0.63, 1, 0.28, 1, 0.17, 1, 0.22, 1, 0.14, 1, 0.73, 1, 0.35, 1, 0.54, 1, 0.42, 1, 0.41, 1, 0.64, 1, 0.19, 1, 0.35, 1, 0.23, 1, 0.67, 1, 0.16, 1, 0.46, 1, 0.32, 1, 0.59, 1, 0.14, 1, 0.38, 1, 0.32, 1, 0.78, 1, 0.14, 1, 0.4), tolerance = 1e-07) expect_equal(simResult3$numberOfPopulations[1, ], c(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3)) expect_equal(simResult3$numberOfPopulations[2, ], c(1.24, 1.2, 1.25, 1.18, 1.18, 1.38, 1.21, 1.33, 1.2, 1.32, 1.17, 1.26, 1.36, 1.3, 1.27, 1.37), tolerance = 1e-07) expect_equal(simResult3$expectedNumberOfSubjects, c(300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$sampleSizes)), c(15, 23.007143, 15, 24.6, 15, 35.778571, 15, 33.607143, 15, 8.0928571, 15, 12.764286, 15, 17.042857, 15, 13.757143, 15, 29.442857, 15, 27.278571, 15, 37.6, 15, 34.414286, 15, 15.3, 15, 16.7, 15, 25.742857, 15, 25.071429, 60, 51.028571, 60, 44.4, 60, 29.114286, 60, 30.428571, 60, 82.371429, 60, 68.057143, 60, 67.171429, 60, 69.028571, 60, 38.771429, 60, 40.114286, 60, 25.4, 60, 27.657143, 60, 68.2, 60, 58.8, 60, 51.971429, 60, 47.285714, 30, 57.514286, 30, 52.2, 30, 76.557143, 30, 70.214286, 30, 49.185714, 30, 39.028571, 30, 58.585714, 30, 46.514286, 30, 67.385714, 30, 56.057143, 30, 80.7, 30, 70.828571, 30, 52.1, 30, 39.4, 30, 65.985714, 30, 59.642857, 45, 18.45, 45, 28.8, 45, 8.55, 45, 15.75, 45, 10.35, 45, 30.15, 45, 7.2, 45, 20.7, 45, 14.4, 45, 26.55, 45, 6.3, 45, 17.1, 45, 14.4, 45, 35.1, 45, 6.3, 45, 18), tolerance = 1e-07) expect_equal(simResult3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult3$conditionalPowerAchieved[2, ], c(0.049725876, 0.09761839, 0.23332728, 0.27699949, 0.24938469, 0.25259324, 0.41341769, 0.52195003, 0.091306519, 0.16413348, 0.27352495, 0.30455767, 0.309829, 0.37988667, 0.56618897, 0.69760011), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult3), NA))) expect_output(print(simResult3)$show()) invisible(capture.output(expect_error(summary(simResult3), NA))) expect_output(summary(simResult3)$show()) suppressWarnings(simResult3CodeBased <- eval(parse(text = getObjectRCode(simResult3, stringWrapParagraphWidth = NULL)))) expect_equal(simResult3CodeBased$iterations, simResult3$iterations, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectAtLeastOne, simResult3$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectedPopulationsPerStage, simResult3$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$futilityPerStage, simResult3$futilityPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$earlyStop, simResult3$earlyStop, tolerance = 1e-05) expect_equal(simResult3CodeBased$successPerStage, simResult3$successPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$selectedPopulations, simResult3$selectedPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$numberOfPopulations, simResult3$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$expectedNumberOfSubjects, simResult3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult3CodeBased$sampleSizes, simResult3$sampleSizes, tolerance = 1e-05) expect_equal(simResult3CodeBased$conditionalPowerAchieved, simResult3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult3), "character") df <- as.data.frame(simResult3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult4 <- getSimulationEnrichmentRates(design, plannedSubjects = c(150, 300), effectList = effectList, maxNumberOfIterations = 100, effectMeasure = "effectEstimate", stratifiedAnalysis = FALSE, allocationRatioPlanned = 1.5, directionUpper = TRUE, successCriterion = "atLeastOne", typeOfSelection = "best", intersectionTest = "Sidak", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult4' with expected results expect_equal(simResult4$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult4$iterations[2, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult4$rejectAtLeastOne, c(0.01, 0.05, 0.27, 0.38, 0.42, 0.54, 0.58, 0.77, 0.07, 0.18, 0.52, 0.64, 0.39, 0.53, 0.83, 0.94), tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$rejectedPopulationsPerStage)), c(0, 0, 0, 0.01, 0, 0.25, 0, 0.26, 0, 0, 0, 0, 0, 0.15, 0, 0.15, 0, 0.05, 0, 0.1, 0, 0.49, 0, 0.58, 0, 0.06, 0, 0.1, 0, 0.37, 0, 0.3, 0, 0.01, 0, 0, 0, 0.02, 0, 0.02, 0, 0.39, 0, 0.25, 0, 0.42, 0, 0.45, 0, 0.01, 0, 0.01, 0, 0.02, 0, 0, 0, 0.26, 0, 0.21, 0, 0.43, 0, 0.35, 0, 0, 0, 0.04, 0, 0, 0, 0.1, 0, 0.03, 0, 0.29, 0, 0.01, 0, 0.17, 0, 0.01, 0, 0.07, 0, 0.01, 0, 0.06, 0, 0.07, 0, 0.22, 0, 0.03, 0, 0.29), tolerance = 1e-07) expect_equal(simResult4$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult4$earlyStop[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult4$successPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult4$successPerStage[2, ], c(0.01, 0.05, 0.27, 0.38, 0.42, 0.54, 0.58, 0.77, 0.07, 0.18, 0.52, 0.64, 0.39, 0.53, 0.83, 0.94), tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$selectedPopulations)), c(1, 0.36, 1, 0.29, 1, 0.72, 1, 0.61, 1, 0.12, 1, 0.07, 1, 0.3, 1, 0.28, 1, 0.6, 1, 0.42, 1, 0.77, 1, 0.75, 1, 0.19, 1, 0.27, 1, 0.42, 1, 0.34, 1, 0.4, 1, 0.21, 1, 0.21, 1, 0.11, 1, 0.72, 1, 0.43, 1, 0.61, 1, 0.5, 1, 0.22, 1, 0.1, 1, 0.12, 1, 0.03, 1, 0.6, 1, 0.33, 1, 0.49, 1, 0.36, 1, 0.24, 1, 0.5, 1, 0.07, 1, 0.28, 1, 0.16, 1, 0.5, 1, 0.09, 1, 0.22, 1, 0.18, 1, 0.48, 1, 0.11, 1, 0.22, 1, 0.21, 1, 0.4, 1, 0.09, 1, 0.3), tolerance = 1e-07) expect_equal(simResult4$numberOfPopulations[1, ], c(3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3)) expect_equal(simResult4$numberOfPopulations[2, ], c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)) expect_equal(simResult4$expectedNumberOfSubjects, c(300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300)) expect_equal(unlist(as.list(simResult4$sampleSizes)), c(15, 21.6, 15, 22, 15, 37.05, 15, 34.7, 15, 8.4, 15, 11, 15, 16.35, 15, 17.3, 15, 32.7, 15, 28.2, 15, 40.15, 15, 40.8, 15, 12.65, 15, 19.5, 15, 22.35, 15, 21.5, 60, 54.4, 60, 51, 60, 25.2, 60, 27.8, 60, 81.6, 60, 73, 60, 66.4, 60, 63.2, 60, 32.8, 60, 38.8, 60, 18.6, 60, 16.2, 60, 72.6, 60, 57, 60, 54.4, 60, 54, 30, 63.2, 30, 54.5, 30, 84.6, 30, 74.9, 30, 52.8, 30, 43.5, 30, 63.2, 30, 59.6, 30, 76.4, 30, 61.4, 30, 86.3, 30, 83.1, 30, 55.3, 30, 55.5, 30, 69.2, 30, 61, 45, 10.8, 45, 22.5, 45, 3.15, 45, 12.6, 45, 7.2, 45, 22.5, 45, 4.05, 45, 9.9, 45, 8.1, 45, 21.6, 45, 4.95, 45, 9.9, 45, 9.45, 45, 18, 45, 4.05, 45, 13.5), tolerance = 1e-07) expect_equal(simResult4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult4$conditionalPowerAchieved[2, ], c(0.049846768, 0.13642814, 0.19933025, 0.24691696, 0.23422702, 0.31462001, 0.42177681, 0.55370896, 0.056314813, 0.13292646, 0.2493284, 0.31063163, 0.27530592, 0.41566754, 0.59151016, 0.69993156), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult4), NA))) expect_output(print(simResult4)$show()) invisible(capture.output(expect_error(summary(simResult4), NA))) expect_output(summary(simResult4)$show()) suppressWarnings(simResult4CodeBased <- eval(parse(text = getObjectRCode(simResult4, stringWrapParagraphWidth = NULL)))) expect_equal(simResult4CodeBased$iterations, simResult4$iterations, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectAtLeastOne, simResult4$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectedPopulationsPerStage, simResult4$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$futilityPerStage, simResult4$futilityPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$earlyStop, simResult4$earlyStop, tolerance = 1e-05) expect_equal(simResult4CodeBased$successPerStage, simResult4$successPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$selectedPopulations, simResult4$selectedPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$numberOfPopulations, simResult4$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$expectedNumberOfSubjects, simResult4$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult4CodeBased$sampleSizes, simResult4$sampleSizes, tolerance = 1e-05) expect_equal(simResult4CodeBased$conditionalPowerAchieved, simResult4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult4), "character") df <- as.data.frame(simResult4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationEnrichmentRates': gMax = 4", { .skipTestIfDisabled() .skipTestIfNotX64() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:subsec:adaptiveClosedTestProcedureEnrichment} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:testStatisticEnrichmentRates} # @refFS[Formula]{fs:stratifiedTestEnrichmentRates} # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:simulationEnrichmentRatesGenerate} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} effectList <- list( subGroups = c("S1", "S2", "S3", "S12", "S13", "S23", "S123", "R"), prevalences = c(0.1, 0.15, 0.2, 0.1, 0, 0.18, 0.1, 0.17), piControl = rep(0.2, 8), piTreatments = matrix(rep(0.2, 8) + c(0.1, 0.025, 0.15, 0.075, 0.03, 0.125, 0.15, 0.025), byrow = TRUE, ncol = 8) ) design <- getDesignInverseNormal( informationRates = c(0.4, 1), typeOfDesign = "noEarlyEfficacy" ) suppressWarnings(simResult1 <- getSimulationEnrichmentRates(design, plannedSubjects = c(320, 640), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 1, directionUpper = TRUE, typeOfSelection = "best", adaptations = c(T), intersectionTest = "Sidak", stratifiedAnalysis = TRUE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult1' with expected results expect_equal(simResult1$iterations[1, ], 100) expect_equal(simResult1$iterations[2, ], 100) expect_equal(simResult1$rejectAtLeastOne, 0.89, tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$rejectedPopulationsPerStage)), c(0, 0.33, 0, 0.08, 0, 0.46, 0, 0.02), tolerance = 1e-07) expect_equal(simResult1$futilityPerStage[1, ], 0) expect_equal(simResult1$earlyStop[1, ], 0) expect_equal(simResult1$successPerStage[1, ], 0) expect_equal(simResult1$successPerStage[2, ], 0.89, tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$selectedPopulations)), c(1, 0.36, 1, 0.11, 1, 0.5, 1, 0.03), tolerance = 1e-07) expect_equal(simResult1$numberOfPopulations[1, ], 4) expect_equal(simResult1$numberOfPopulations[2, ], 1) expect_equal(simResult1$expectedNumberOfSubjects, 640, tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$sampleSizes)), c(32, 39.36, 48, 11.402264, 64, 68.586667, 32, 46.001509, 0, 0, 57.6, 73.682717, 32, 79.334843, 54.4, 1.632), tolerance = 1e-07) expect_equal(simResult1$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simResult1$conditionalPowerAchieved[2, ], 0.52086641, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult1), NA))) expect_output(print(simResult1)$show()) invisible(capture.output(expect_error(summary(simResult1), NA))) expect_output(summary(simResult1)$show()) suppressWarnings(simResult1CodeBased <- eval(parse(text = getObjectRCode(simResult1, stringWrapParagraphWidth = NULL)))) expect_equal(simResult1CodeBased$iterations, simResult1$iterations, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectAtLeastOne, simResult1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectedPopulationsPerStage, simResult1$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$futilityPerStage, simResult1$futilityPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$earlyStop, simResult1$earlyStop, tolerance = 1e-05) expect_equal(simResult1CodeBased$successPerStage, simResult1$successPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$selectedPopulations, simResult1$selectedPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$numberOfPopulations, simResult1$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$expectedNumberOfSubjects, simResult1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult1CodeBased$sampleSizes, simResult1$sampleSizes, tolerance = 1e-05) expect_equal(simResult1CodeBased$conditionalPowerAchieved, simResult1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult1), "character") df <- as.data.frame(simResult1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult2 <- getSimulationEnrichmentRates(design, plannedSubjects = c(320, 640), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 1, directionUpper = TRUE, typeOfSelection = "rbest", rValue = 2, adaptations = c(T), intersectionTest = "Simes", stratifiedAnalysis = TRUE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult2' with expected results expect_equal(simResult2$iterations[1, ], 100) expect_equal(simResult2$iterations[2, ], 100) expect_equal(simResult2$rejectAtLeastOne, 0.72, tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$rejectedPopulationsPerStage)), c(0, 0.28, 0, 0.23, 0, 0.58, 0, 0.18), tolerance = 1e-07) expect_equal(simResult2$futilityPerStage[1, ], 0) expect_equal(simResult2$earlyStop[1, ], 0) expect_equal(simResult2$successPerStage[1, ], 0) expect_equal(simResult2$successPerStage[2, ], 0.55, tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$selectedPopulations)), c(1, 0.54, 1, 0.35, 1, 0.77, 1, 0.34), tolerance = 1e-07) expect_equal(simResult2$numberOfPopulations[1, ], 4) expect_equal(simResult2$numberOfPopulations[2, ], 2) expect_equal(simResult2$expectedNumberOfSubjects, 640) expect_equal(unlist(as.list(simResult2$sampleSizes)), c(32, 33.520523, 48, 39.479817, 64, 69.476358, 32, 41.84929, 0, 0, 57.6, 75.328722, 32, 41.84929, 54.4, 18.496), tolerance = 1e-07) expect_equal(simResult2$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simResult2$conditionalPowerAchieved[2, ], 0.53747471, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult2), NA))) expect_output(print(simResult2)$show()) invisible(capture.output(expect_error(summary(simResult2), NA))) expect_output(summary(simResult2)$show()) suppressWarnings(simResult2CodeBased <- eval(parse(text = getObjectRCode(simResult2, stringWrapParagraphWidth = NULL)))) expect_equal(simResult2CodeBased$iterations, simResult2$iterations, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectAtLeastOne, simResult2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectedPopulationsPerStage, simResult2$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$futilityPerStage, simResult2$futilityPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$earlyStop, simResult2$earlyStop, tolerance = 1e-05) expect_equal(simResult2CodeBased$successPerStage, simResult2$successPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$selectedPopulations, simResult2$selectedPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$numberOfPopulations, simResult2$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$expectedNumberOfSubjects, simResult2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult2CodeBased$sampleSizes, simResult2$sampleSizes, tolerance = 1e-05) expect_equal(simResult2CodeBased$conditionalPowerAchieved, simResult2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult2), "character") df <- as.data.frame(simResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult3 <- getSimulationEnrichmentRates(design, plannedSubjects = c(320, 640), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 2, directionUpper = TRUE, typeOfSelection = "epsilon", epsilonValue = 0.025, adaptations = c(T), intersectionTest = "Sidak", stratifiedAnalysis = FALSE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult3' with expected results expect_equal(simResult3$iterations[1, ], 100) expect_equal(simResult3$iterations[2, ], 100) expect_equal(simResult3$rejectAtLeastOne, 0.73, tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$rejectedPopulationsPerStage)), c(0, 0.17, 0, 0.07, 0, 0.56, 0, 0.05), tolerance = 1e-07) expect_equal(simResult3$futilityPerStage[1, ], 0) expect_equal(simResult3$earlyStop[1, ], 0) expect_equal(simResult3$successPerStage[1, ], 0) expect_equal(simResult3$successPerStage[2, ], 0.63, tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$selectedPopulations)), c(1, 0.36, 1, 0.21, 1, 0.77, 1, 0.1), tolerance = 1e-07) expect_equal(simResult3$numberOfPopulations[1, ], 4) expect_equal(simResult3$numberOfPopulations[2, ], 1.44, tolerance = 1e-07) expect_equal(simResult3$expectedNumberOfSubjects, 640) expect_equal(unlist(as.list(simResult3$sampleSizes)), c(32, 28.456504, 48, 16.155061, 64, 87.139927, 32, 33.609257, 0, 0, 57.6, 84.256662, 32, 64.94259, 54.4, 5.44), tolerance = 1e-07) expect_equal(simResult3$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simResult3$conditionalPowerAchieved[2, ], 0.37873435, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult3), NA))) expect_output(print(simResult3)$show()) invisible(capture.output(expect_error(summary(simResult3), NA))) expect_output(summary(simResult3)$show()) suppressWarnings(simResult3CodeBased <- eval(parse(text = getObjectRCode(simResult3, stringWrapParagraphWidth = NULL)))) expect_equal(simResult3CodeBased$iterations, simResult3$iterations, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectAtLeastOne, simResult3$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectedPopulationsPerStage, simResult3$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$futilityPerStage, simResult3$futilityPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$earlyStop, simResult3$earlyStop, tolerance = 1e-05) expect_equal(simResult3CodeBased$successPerStage, simResult3$successPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$selectedPopulations, simResult3$selectedPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$numberOfPopulations, simResult3$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$expectedNumberOfSubjects, simResult3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult3CodeBased$sampleSizes, simResult3$sampleSizes, tolerance = 1e-05) expect_equal(simResult3CodeBased$conditionalPowerAchieved, simResult3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult3), "character") df <- as.data.frame(simResult3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult4 <- getSimulationEnrichmentRates(design, plannedSubjects = c(320, 640), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 0.5, directionUpper = TRUE, typeOfSelection = "rbest", rValue = 1, adaptations = c(T), intersectionTest = "Simes", stratifiedAnalysis = FALSE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentRates object 'simResult4' with expected results expect_equal(simResult4$iterations[1, ], 100) expect_equal(simResult4$iterations[2, ], 100) expect_equal(simResult4$rejectAtLeastOne, 0.91, tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$rejectedPopulationsPerStage)), c(0, 0.36, 0, 0.11, 0, 0.43, 0, 0.01), tolerance = 1e-07) expect_equal(simResult4$futilityPerStage[1, ], 0) expect_equal(simResult4$earlyStop[1, ], 0) expect_equal(simResult4$successPerStage[1, ], 0) expect_equal(simResult4$successPerStage[2, ], 0.91, tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$selectedPopulations)), c(1, 0.39, 1, 0.13, 1, 0.44, 1, 0.04), tolerance = 1e-07) expect_equal(simResult4$numberOfPopulations[1, ], 4) expect_equal(simResult4$numberOfPopulations[2, ], 1) expect_equal(simResult4$expectedNumberOfSubjects, 640) expect_equal(unlist(as.list(simResult4$sampleSizes)), c(32, 42.88, 48, 13.693585, 64, 61.226667, 32, 50.729057, 0, 0, 57.6, 69.232302, 32, 80.06239, 54.4, 2.176), tolerance = 1e-07) expect_equal(simResult4$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simResult4$conditionalPowerAchieved[2, ], 0.50602278, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult4), NA))) expect_output(print(simResult4)$show()) invisible(capture.output(expect_error(summary(simResult4), NA))) expect_output(summary(simResult4)$show()) suppressWarnings(simResult4CodeBased <- eval(parse(text = getObjectRCode(simResult4, stringWrapParagraphWidth = NULL)))) expect_equal(simResult4CodeBased$iterations, simResult4$iterations, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectAtLeastOne, simResult4$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectedPopulationsPerStage, simResult4$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$futilityPerStage, simResult4$futilityPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$earlyStop, simResult4$earlyStop, tolerance = 1e-05) expect_equal(simResult4CodeBased$successPerStage, simResult4$successPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$selectedPopulations, simResult4$selectedPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$numberOfPopulations, simResult4$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$expectedNumberOfSubjects, simResult4$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult4CodeBased$sampleSizes, simResult4$sampleSizes, tolerance = 1e-05) expect_equal(simResult4CodeBased$conditionalPowerAchieved, simResult4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult4), "character") df <- as.data.frame(simResult4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationEnrichmentRates': comparison of base and enrichment for inverse normal", { .skipTestIfDisabled() .skipTestIfNotX64() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:simulationEnrichmentRatesGenerate} # @refFS[Formula]{fs:testStatisticEnrichmentRates} # @refFS[Formula]{fs:stratifiedTestEnrichmentRates} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} effectList <- list( subGroups = "F", prevalences = 1, piTreatments = matrix(seq(0.1, 0.4, 0.05), byrow = TRUE, ncol = 1), piControl = 0.4 ) design <- getDesignInverseNormal( informationRates = c(0.3, 0.7, 1), typeOfDesign = "asUser", userAlphaSpending = c(0.001, 0.005, 0.025), futilityBounds = c(0.1, 0.2) ) suppressWarnings(x1 <- getSimulationEnrichmentRates(design, plannedSubjects = c(60, 120, 180), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 0.5, directionUpper = FALSE, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10, 10), maxNumberOfSubjectsPerStage = c(NA, 180, 180), seed = 123 )) x2 <- getSimulationRates(design, plannedSubjects = c(60, 120, 180), pi1 = seq(0.1, 0.4, 0.05), maxNumberOfIterations = 100, allocationRatioPlanned = 0.5, pi2 = 0.4, directionUpper = FALSE, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10, 10), maxNumberOfSubjectsPerStage = c(NA, 180, 180), seed = 123 ) comp1 <- x2$overallReject - x1$rejectAtLeastOne ## Comparison of the results of numeric object 'comp1' with expected results expect_equal(comp1, c(0.01, -0.02, -0.05, 0.05, -0.04, -0.01, 0), tolerance = 1e-07) comp2 <- x2$conditionalPowerAchieved - x1$conditionalPowerAchieved ## Comparison of the results of matrixarray object 'comp2' with expected results expect_equal(comp2[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(comp2[2, ], c(-0.025509072, 0.012775113, 0.070479737, -0.032229481, -0.033954309, -0.00098028605, 0.042084407), tolerance = 1e-07) expect_equal(comp2[3, ], c(0.010055798, 0.0013933335, -0.038882763, 0.065726498, -0.078554562, 0.016763805, -0.040918315), tolerance = 1e-07) comp3 <- x2$expectedNumberOfSubjects - x1$expectedNumberOfSubjects ## Comparison of the results of numeric object 'comp3' with expected results expect_equal(comp3, c(-12.434592, -4.3689056, -8.2101141, -5.602304, 0.020282466, -29.650036, 15.243032), tolerance = 1e-07) }) test_that("'getSimulationEnrichmentRates': comparison of base and enrichment for Fisher combination", { .skipTestIfDisabled() .skipTestIfNotX64() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:simulationEnrichmentRatesGenerate} # @refFS[Formula]{fs:testStatisticEnrichmentRates} # @refFS[Formula]{fs:stratifiedTestEnrichmentRates} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} effectList <- list( subGroups = "F", prevalences = 1, piTreatments = matrix(seq(0.1, 0.4, 0.05), byrow = TRUE, ncol = 1), piControl = 0.4 ) design <- getDesignFisher(informationRates = c(0.3, 0.7, 1), method = "fullAlpha", alpha0Vec = c(0.5, 0.4), kMax = 3) suppressWarnings(x1 <- getSimulationEnrichmentRates(design, plannedSubjects = c(60, 120, 180), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 0.5, directionUpper = FALSE, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10, 10), maxNumberOfSubjectsPerStage = c(NA, 180, 180), seed = 123 )) x2 <- getSimulationRates(design, plannedSubjects = c(60, 120, 180), pi1 = seq(0.1, 0.4, 0.05), maxNumberOfIterations = 100, allocationRatioPlanned = 0.5, pi2 = 0.4, directionUpper = FALSE, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10, 10), maxNumberOfSubjectsPerStage = c(NA, 180, 180), seed = 123 ) comp4 <- x2$overallReject - x1$rejectAtLeastOne ## Comparison of the results of numeric object 'comp4' with expected results expect_equal(comp4, c(0, -0.03, -0.07, -0.05, 0.06, 0, 0.03), tolerance = 1e-07) comp5 <- x2$conditionalPowerAchieved - x1$conditionalPowerAchieved ## Comparison of the results of matrixarray object 'comp5' with expected results expect_equal(comp5[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(comp5[2, ], c(0.047883697, -0.012456169, 0.030195535, 0.040269247, -0.012692642, 0.10456209, -0.012774146), tolerance = 1e-07) expect_equal(comp5[3, ], c(0.0080078465, -0.026077103, 0.024172504, -0.036648722, -0.028435685, 0.10893012, 0.014935464), tolerance = 1e-07) comp6 <- x2$expectedNumberOfSubjects - x1$expectedNumberOfSubjects ## Comparison of the results of numeric object 'comp6' with expected results expect_equal(comp6, c(-15.662688, 11.12334, -9.6084771, 12.680032, -11.581166, -34.68345, -0.36035592), tolerance = 1e-07) }) rpact/tests/testthat/test-class_simulation_results.R0000644000176200001440000000276414446750002022632 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-class_analysis_dataset.R ## | Creation date: 06 February 2023, 12:04:06 ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Class 'SimulationResults'") test_that("Test that simulation result class generics and utility functions throw errors outside of context", { expect_error(.assertIsValidVariedParameterVectorForSimulationResultsPlotting()) expect_error(.getSimulationPlotXAxisParameterName()) expect_error(.getSimulationPlotXAxisLabel()) expect_error(.getPowerAndStoppingProbabilities()) expect_error(.plotSimulationResults()) expect_error(plot.SimulationResults()) expect_error(getData(1)) expect_error(.getData.SimulationResults()) expect_error(.getAggregatedDataByIterationNumber()) expect_error(.getAggregatedData()) expect_error(getRawData()) })rpact/tests/testthat/test-f_simulation_multiarm_means.R0000644000176200001440000041265414372422771023300 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_simulation_multiarm_means.R ## | Creation date: 06 February 2023, 12:14:32 ## | File version: $Revision: 6810 $ ## | Last changed: $Date: 2023-02-13 12:58:47 +0100 (Mo, 13 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Simulation Multi-Arm Means Function") test_that("'getSimulationMultiArmMeans': several configurations", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmMeans} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmMeansGenerate} # @refFS[Formula]{fs:simulationMultiArmMeansTestStatistics} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} x1 <- getSimulationMultiArmMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x1' with expected results expect_equal(x1$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x1$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x1$iterations[3, ], c(9, 8, 8, 5)) expect_equal(x1$rejectAtLeastOne, c(0.3, 0.6, 0.8, 0.9), tolerance = 1e-07) expect_equal(unlist(as.list(x1$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0.1, 0.1, 0, 0, 0.1, 0, 0.2, 0.1, 0, 0, 0.3, 0, 0.1, 0, 0, 0.1, 0.1, 0, 0, 0.1, 0, 0.2, 0.3, 0, 0.3, 0.3), tolerance = 1e-07) expect_equal(x1$futilityStop, c(0, 0, 0, 0)) expect_equal(x1$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x1$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x1$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x1$earlyStop[2, ], c(0.1, 0.2, 0.2, 0.5), tolerance = 1e-07) expect_equal(x1$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x1$successPerStage[2, ], c(0.1, 0.2, 0.2, 0.5), tolerance = 1e-07) expect_equal(x1$successPerStage[3, ], c(0.2, 0.4, 0.6, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x1$selectedArms)), c(1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0, 0, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0, 0, 1, 0.3, 0.2, 1, 0.1, 0.1, 1, 0.3, 0.1, 1, 0.4, 0.4, 1, 0.1, 0, 1, 0.5, 0.4, 1, 0.2, 0.2, 1, 0.5, 0.3, 1, 0.6, 0.3, 1, 1, 0.9, 1, 1, 0.8, 1, 1, 0.8, 1, 1, 0.5), tolerance = 1e-07) expect_equal(x1$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x1$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x1$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x1$expectedNumberOfSubjects, c(268.55306, 310.74423, 296.80608, 214.56859), tolerance = 1e-07) expect_equal(unlist(as.list(x1$sampleSizes)), c(10, 1.1840544, 11.111111, 10, 10, 12.5, 10, 0.74314427, 1.9878756, 10, 0, 0, 10, 7.3350068, 25.517647, 10, 26.989766, 43.604406, 10, 0, 0, 10, 21.344686, 26.724319, 10, 2.6348908, 7.2351621, 10, 21.298615, 12.5, 10, 40, 44.643278, 10, 10, 0, 10, 33.493936, 27.945681, 10, 4.3287276, 16.089351, 10, 25.258173, 25.120998, 10, 23.39578, 28.363338, 10, 44.647888, 71.809601, 10, 62.617108, 84.693757, 10, 66.001318, 71.752151, 10, 54.740466, 55.087656), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(0.046651357, 0.022479034, 0.083769211, 0.082365248), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[3, ], c(0.49123587, 0.2668344, 0.64496483, 0.65218675), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$iterations, x1$iterations, tolerance = 1e-05) expect_equal(x1CodeBased$rejectAtLeastOne, x1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x1CodeBased$rejectedArmsPerStage, x1$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$futilityStop, x1$futilityStop, tolerance = 1e-05) expect_equal(x1CodeBased$futilityPerStage, x1$futilityPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$earlyStop, x1$earlyStop, tolerance = 1e-05) expect_equal(x1CodeBased$successPerStage, x1$successPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$selectedArms, x1$selectedArms, tolerance = 1e-05) expect_equal(x1CodeBased$numberOfActiveArms, x1$numberOfActiveArms, tolerance = 1e-05) expect_equal(x1CodeBased$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x1CodeBased$sampleSizes, x1$sampleSizes, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPowerAchieved, x1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x2 <- getSimulationMultiArmMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "userDefined", activeArms = 4, plannedSubjects = c(10, 30, 50), stDev = 1.2, adaptations = rep(TRUE, 2), effectMatrix = matrix(c(0.1, 0.2, 0.3, 0.4, 0.2, 0.3, 0.4, 0.5), ncol = 4), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x2' with expected results expect_equal(x2$iterations[1, ], c(10, 10)) expect_equal(x2$iterations[2, ], c(10, 10)) expect_equal(x2$iterations[3, ], c(8, 8)) expect_equal(x2$rejectAtLeastOne, c(0.5, 0.6), tolerance = 1e-07) expect_equal(unlist(as.list(x2$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.3, 0, 0, 0, 0, 0.2, 0, 0, 0.2, 0.2, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x2$futilityStop, c(0, 0)) expect_equal(x2$futilityPerStage[1, ], c(0, 0)) expect_equal(x2$futilityPerStage[2, ], c(0, 0)) expect_equal(x2$earlyStop[1, ], c(0, 0)) expect_equal(x2$earlyStop[2, ], c(0.2, 0.2), tolerance = 1e-07) expect_equal(x2$successPerStage[1, ], c(0, 0)) expect_equal(x2$successPerStage[2, ], c(0.2, 0.2), tolerance = 1e-07) expect_equal(x2$successPerStage[3, ], c(0.3, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x2$selectedArms)), c(1, 0.1, 0.1, 1, 0.2, 0.2, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0.1, 0.1, 1, 0.2, 0, 1, 0.5, 0.3, 1, 0.2, 0.2, 1, 1, 0.8, 1, 1, 0.8), tolerance = 1e-07) expect_equal(x2$numberOfActiveArms[1, ], c(4, 4)) expect_equal(x2$numberOfActiveArms[2, ], c(1, 1)) expect_equal(x2$numberOfActiveArms[3, ], c(1, 1)) expect_equal(x2$expectedNumberOfSubjects, c(238.96461, 281.13648), tolerance = 1e-07) expect_equal(unlist(as.list(x2$sampleSizes)), c(10, 1.1060693, 12.5, 10, 20, 25, 10, 4.7297328, 25.346201, 10, 18.776011, 38.686485, 10, 2.8470245, 10.408309, 10, 11.298615, 0, 10, 26.795872, 25.5, 10, 3.2314462, 14.141225, 10, 35.478699, 73.75451, 10, 53.306071, 77.82771), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0.064857702, 0.041878984), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[3, ], c(0.72573181, 0.45099208), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$iterations, x2$iterations, tolerance = 1e-05) expect_equal(x2CodeBased$rejectAtLeastOne, x2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x2CodeBased$rejectedArmsPerStage, x2$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$futilityStop, x2$futilityStop, tolerance = 1e-05) expect_equal(x2CodeBased$futilityPerStage, x2$futilityPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$earlyStop, x2$earlyStop, tolerance = 1e-05) expect_equal(x2CodeBased$successPerStage, x2$successPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$selectedArms, x2$selectedArms, tolerance = 1e-05) expect_equal(x2CodeBased$numberOfActiveArms, x2$numberOfActiveArms, tolerance = 1e-05) expect_equal(x2CodeBased$expectedNumberOfSubjects, x2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x2CodeBased$sampleSizes, x2$sampleSizes, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPowerAchieved, x2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x3 <- getSimulationMultiArmMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "sigmoidEmax", gED50 = 2, slope = 0.5, activeArms = 4, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x3' with expected results expect_equal(x3$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x3$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x3$iterations[3, ], c(10, 9, 9, 8)) expect_equal(x3$rejectAtLeastOne, c(0, 0.3, 0.6, 0.7), tolerance = 1e-07) expect_equal(unlist(as.list(x3$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0.1, 0, 0, 0, 0.2, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.2, 0, 0, 0.4), tolerance = 1e-07) expect_equal(x3$futilityStop, c(0, 0, 0, 0)) expect_equal(x3$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x3$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x3$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x3$earlyStop[2, ], c(0, 0.1, 0.1, 0.2), tolerance = 1e-07) expect_equal(x3$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x3$successPerStage[2, ], c(0, 0.1, 0.1, 0.2), tolerance = 1e-07) expect_equal(x3$successPerStage[3, ], c(0, 0.2, 0.5, 0.5), tolerance = 1e-07) expect_equal(unlist(as.list(x3$selectedArms)), c(1, 0.1, 0.1, 1, 0.3, 0.3, 1, 0.1, 0.1, 1, 0, 0, 1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0, 0, 1, 0.3, 0.2, 1, 0.2, 0.2, 1, 0.2, 0.1, 1, 0.4, 0.4, 1, 0.1, 0, 1, 0.4, 0.4, 1, 0.2, 0.2, 1, 0.5, 0.4, 1, 0.6, 0.6, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.9, 1, 1, 0.8), tolerance = 1e-07) expect_equal(x3$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x3$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x3$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x3$expectedNumberOfSubjects, c(295.76875, 343.71408, 335.10548, 281.56474), tolerance = 1e-07) expect_equal(unlist(as.list(x3$sampleSizes)), c(10, 1.0357205, 10, 10, 30, 33.333333, 10, 0.59871171, 1.0418812, 10, 0, 0, 10, 7.3350068, 22.965882, 10, 16.989766, 27.64836, 10, 0, 0, 10, 21.344686, 16.702699, 10, 13.17796, 20, 10, 15.323901, 2.6274327, 10, 40, 44.444444, 10, 10, 0, 10, 25.447372, 22.922435, 10, 7.2951578, 22.222222, 10, 38.282522, 25.259795, 10, 36.742398, 42.916408, 10, 46.996059, 75.888318, 10, 69.608825, 85.831349, 10, 78.881233, 70.74612, 10, 68.087084, 59.619107), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(0.042062266, 0.013174936, 0.075843331, 0.053971766), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[3, ], c(0.41527426, 0.27301585, 0.35639557, 0.62491311), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$iterations, x3$iterations, tolerance = 1e-05) expect_equal(x3CodeBased$rejectAtLeastOne, x3$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x3CodeBased$rejectedArmsPerStage, x3$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$futilityStop, x3$futilityStop, tolerance = 1e-05) expect_equal(x3CodeBased$futilityPerStage, x3$futilityPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$earlyStop, x3$earlyStop, tolerance = 1e-05) expect_equal(x3CodeBased$successPerStage, x3$successPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$selectedArms, x3$selectedArms, tolerance = 1e-05) expect_equal(x3CodeBased$numberOfActiveArms, x3$numberOfActiveArms, tolerance = 1e-05) expect_equal(x3CodeBased$expectedNumberOfSubjects, x3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x3CodeBased$sampleSizes, x3$sampleSizes, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPowerAchieved, x3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x4 <- getSimulationMultiArmMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "all", plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x4' with expected results expect_equal(x4$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x4$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x4$iterations[3, ], c(10, 10, 10, 10)) expect_equal(x4$rejectAtLeastOne, c(0.4, 0.8, 1, 1), tolerance = 1e-07) expect_equal(unlist(as.list(x4$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0, 0, 0.1, 0.2, 0, 0, 0.1, 0, 0, 0.3, 0, 0.2, 0.3, 0, 0.6, 0.2, 0, 0, 0.4, 0, 0.1, 0.7, 0, 0.4, 0.6, 0, 0.7, 0.3), tolerance = 1e-07) expect_equal(x4$futilityStop, c(0, 0, 0, 0)) expect_equal(x4$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x4$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x4$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x4$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x4$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x4$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x4$successPerStage[3, ], c(0, 0.1, 0, 0), tolerance = 1e-07) expect_equal(unlist(as.list(x4$selectedArms)), c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)) expect_equal(x4$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x4$numberOfActiveArms[2, ], c(4, 4, 4, 4)) expect_equal(x4$numberOfActiveArms[3, ], c(4, 4, 4, 4)) expect_equal(x4$expectedNumberOfSubjects, c(1050, 891.96665, 849.19143, 705.05343), tolerance = 1e-07) expect_equal(unlist(as.list(x4$sampleSizes)), c(10, 100, 100, 10, 87.259377, 81.133954, 10, 94.901963, 64.936322, 10, 98.210686, 32.8, 10, 100, 100, 10, 87.259377, 81.133954, 10, 94.901963, 64.936322, 10, 98.210686, 32.8, 10, 100, 100, 10, 87.259377, 81.133954, 10, 94.901963, 64.936322, 10, 98.210686, 32.8, 10, 100, 100, 10, 87.259377, 81.133954, 10, 94.901963, 64.936322, 10, 98.210686, 32.8, 10, 100, 100, 10, 87.259377, 81.133954, 10, 94.901963, 64.936322, 10, 98.210686, 32.8), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.0086377938, 0.22005253, 0.081022458, 0.15135806), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[3, ], c(0.17779298, 0.23451185, 0.45925582, 0.77364695), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$iterations, x4$iterations, tolerance = 1e-05) expect_equal(x4CodeBased$rejectAtLeastOne, x4$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x4CodeBased$rejectedArmsPerStage, x4$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$futilityStop, x4$futilityStop, tolerance = 1e-05) expect_equal(x4CodeBased$futilityPerStage, x4$futilityPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$earlyStop, x4$earlyStop, tolerance = 1e-05) expect_equal(x4CodeBased$successPerStage, x4$successPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$selectedArms, x4$selectedArms, tolerance = 1e-05) expect_equal(x4CodeBased$numberOfActiveArms, x4$numberOfActiveArms, tolerance = 1e-05) expect_equal(x4CodeBased$expectedNumberOfSubjects, x4$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x4CodeBased$sampleSizes, x4$sampleSizes, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPowerAchieved, x4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x5 <- getSimulationMultiArmMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "rBest", rValue = 2, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x5' with expected results expect_equal(x5$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x5$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x5$iterations[3, ], c(10, 9, 8, 8)) expect_equal(x5$rejectAtLeastOne, c(0.5, 0.9, 1, 0.9), tolerance = 1e-07) expect_equal(unlist(as.list(x5$rejectedArmsPerStage)), c(0, 0, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.2, 0.3, 0, 0.1, 0, 0, 0, 0.1, 0, 0, 0.3, 0, 0.3, 0.2, 0, 0.2, 0.2, 0, 0.1, 0.3, 0, 0.1, 0.4, 0, 0.1, 0.3, 0, 0.6, 0.3), tolerance = 1e-07) expect_equal(x5$futilityStop, c(0, 0, 0, 0)) expect_equal(x5$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x5$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x5$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x5$earlyStop[2, ], c(0, 0.1, 0.2, 0.2), tolerance = 1e-07) expect_equal(x5$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x5$successPerStage[2, ], c(0, 0.1, 0.2, 0.2), tolerance = 1e-07) expect_equal(x5$successPerStage[3, ], c(0, 0.1, 0.2, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x5$selectedArms)), c(1, 0.5, 0.5, 1, 0.7, 0.6, 1, 0.1, 0.1, 1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0.9, 0.7, 1, 0.4, 0.4, 1, 0.5, 0.5, 1, 0.5, 0.5, 1, 0.6, 0.4, 1, 0.4, 0.2, 1, 0.7, 0.7, 1, 0.5, 0.4, 1, 0.4, 0.4, 1, 0.9, 0.7, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.8, 1, 1, 0.8), tolerance = 1e-07) expect_equal(x5$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x5$numberOfActiveArms[2, ], c(2, 2, 2, 2)) expect_equal(x5$numberOfActiveArms[3, ], c(2, 2, 2, 2)) expect_equal(x5$expectedNumberOfSubjects, c(591.09538, 503.05596, 452.93301, 405.41488), tolerance = 1e-07) expect_equal(unlist(as.list(x5$sampleSizes)), c(10, 42.50248, 47.078471, 10, 45.384313, 50.975979, 10, 10, 12.5, 10, 29.554131, 37.5, 10, 15.855942, 30, 10, 22.437029, 19.843895, 10, 72.307665, 59.768075, 10, 30.61074, 15.281075, 10, 47.430714, 50, 10, 35.976108, 53.08315, 10, 50.052941, 40.398451, 10, 31.50186, 5.7250423, 10, 60.784176, 67.078471, 10, 46.971175, 44.173288, 10, 20.632484, 31.869624, 10, 71.666731, 33.506118, 10, 83.286657, 97.078471, 10, 75.384313, 84.038156, 10, 76.496545, 72.268075, 10, 81.666731, 46.006118), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.061919533, 0.10420825, 0.16753344, 0.13874821), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[3, ], c(0.29816652, 0.52092951, 0.66819594, 0.56533632), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$iterations, x5$iterations, tolerance = 1e-05) expect_equal(x5CodeBased$rejectAtLeastOne, x5$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x5CodeBased$rejectedArmsPerStage, x5$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$futilityStop, x5$futilityStop, tolerance = 1e-05) expect_equal(x5CodeBased$futilityPerStage, x5$futilityPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$earlyStop, x5$earlyStop, tolerance = 1e-05) expect_equal(x5CodeBased$successPerStage, x5$successPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$selectedArms, x5$selectedArms, tolerance = 1e-05) expect_equal(x5CodeBased$numberOfActiveArms, x5$numberOfActiveArms, tolerance = 1e-05) expect_equal(x5CodeBased$expectedNumberOfSubjects, x5$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x5CodeBased$sampleSizes, x5$sampleSizes, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPowerAchieved, x5$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x6 <- getSimulationMultiArmMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x6' with expected results expect_equal(x6$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x6$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x6$iterations[3, ], c(10, 9, 7, 6)) expect_equal(x6$rejectAtLeastOne, c(0.4, 0.6, 0.8, 0.8), tolerance = 1e-07) expect_equal(unlist(as.list(x6$rejectedArmsPerStage)), c(0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0, 0, 0.2, 0, 0, 0.1, 0, 0, 0, 0, 0.1, 0, 0, 0, 0.2, 0, 0.1, 0.4, 0, 0.3, 0.5, 0, 0.2, 0.3), tolerance = 1e-07) expect_equal(x6$futilityStop, c(0, 0, 0, 0)) expect_equal(x6$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x6$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x6$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x6$earlyStop[2, ], c(0, 0.1, 0.3, 0.4), tolerance = 1e-07) expect_equal(x6$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x6$successPerStage[2, ], c(0, 0.1, 0.3, 0.4), tolerance = 1e-07) expect_equal(x6$successPerStage[3, ], c(0.4, 0.5, 0.5, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x6$selectedArms)), c(1, 0.1, 0.1, 1, 0.2, 0.1, 1, 0, 0, 1, 0.1, 0.1, 1, 0.5, 0.4, 1, 0.1, 0, 1, 0.3, 0.2, 1, 0.3, 0.2, 1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0, 0, 1, 0.1, 0, 1, 0.4, 0.4, 1, 0.6, 0.5, 1, 0.8, 0.5, 1, 0.5, 0.3, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.7, 1, 1, 0.6), tolerance = 1e-07) expect_equal(x6$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x6$numberOfActiveArms[2, ], c(1.3, 1.2, 1.1, 1), tolerance = 1e-07) expect_equal(x6$numberOfActiveArms[3, ], c(1.2, 1, 1, 1), tolerance = 1e-07) expect_equal(x6$expectedNumberOfSubjects, c(436.56282, 365.15193, 284.70045, 253.12175), tolerance = 1e-07) expect_equal(unlist(as.list(x6$sampleSizes)), c(10, 4.7999536, 10, 10, 16.971175, 11.111111, 10, 0, 0, 10, 10, 16.666667, 10, 35.332961, 40, 10, 10, 0, 10, 21.400604, 22.595075, 10, 21.344686, 22.270265, 10, 23.218148, 30, 10, 22.202225, 23.298934, 10, 0, 0, 10, 10, 0, 10, 29.860691, 40, 10, 41.405234, 49.459866, 10, 62.809861, 31.890295, 10, 22.672359, 23.636115, 10, 73.351063, 100, 10, 73.607458, 83.869911, 10, 74.210465, 54.485369, 10, 64.017046, 62.573047), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.024687171, 0.015314975, 0.045856815, 0.050229622), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[3, ], c(0.1883251, 0.40048173, 0.51841906, 0.54348956), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$iterations, x6$iterations, tolerance = 1e-05) expect_equal(x6CodeBased$rejectAtLeastOne, x6$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x6CodeBased$rejectedArmsPerStage, x6$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$futilityStop, x6$futilityStop, tolerance = 1e-05) expect_equal(x6CodeBased$futilityPerStage, x6$futilityPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$earlyStop, x6$earlyStop, tolerance = 1e-05) expect_equal(x6CodeBased$successPerStage, x6$successPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$selectedArms, x6$selectedArms, tolerance = 1e-05) expect_equal(x6CodeBased$numberOfActiveArms, x6$numberOfActiveArms, tolerance = 1e-05) expect_equal(x6CodeBased$expectedNumberOfSubjects, x6$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x6CodeBased$sampleSizes, x6$sampleSizes, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPowerAchieved, x6$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x7 <- getSimulationMultiArmMeans( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x7' with expected results expect_equal(x7$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x7$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x7$iterations[3, ], c(9, 8, 8, 5)) expect_equal(x7$rejectAtLeastOne, c(0.2, 0.4, 0.7, 0.8), tolerance = 1e-07) expect_equal(unlist(as.list(x7$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0.2, 0.1, 0, 0, 0.3, 0, 0.1, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0.2, 0.2, 0, 0.3, 0.2), tolerance = 1e-07) expect_equal(x7$futilityStop, c(0, 0, 0, 0)) expect_equal(x7$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x7$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x7$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x7$earlyStop[2, ], c(0.1, 0.2, 0.2, 0.5), tolerance = 1e-07) expect_equal(x7$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x7$successPerStage[2, ], c(0.1, 0.2, 0.2, 0.5), tolerance = 1e-07) expect_equal(x7$successPerStage[3, ], c(0.1, 0.2, 0.5, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x7$selectedArms)), c(1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0, 0, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0, 0, 1, 0.3, 0.2, 1, 0.1, 0.1, 1, 0.3, 0.1, 1, 0.4, 0.4, 1, 0.1, 0, 1, 0.5, 0.4, 1, 0.2, 0.2, 1, 0.5, 0.3, 1, 0.6, 0.3, 1, 1, 0.9, 1, 1, 0.8, 1, 1, 0.8, 1, 1, 0.5), tolerance = 1e-07) expect_equal(x7$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x7$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x7$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x7$expectedNumberOfSubjects, c(222.21727, 277.8712, 297.53775, 227.3405), tolerance = 1e-07) expect_equal(unlist(as.list(x7$sampleSizes)), c(10, 1.1840544, 1.315616, 10, 10, 12.5, 10, 0.74314427, 0.92893034, 10, 0, 0, 10, 7.3350068, 8.1500075, 10, 26.989766, 33.737207, 10, 0, 0, 10, 21.344686, 40, 10, 2.6348908, 2.9276564, 10, 21.298615, 12.5, 10, 40, 50, 10, 10, 0, 10, 33.493936, 33.674217, 10, 4.3287276, 5.4109095, 10, 25.258173, 21.280514, 10, 23.39578, 27.859565, 10, 44.647888, 46.067497, 10, 62.617108, 64.148116, 10, 66.001318, 72.209444, 10, 54.740466, 67.859565), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(0.046651357, 0.022479034, 0.083769211, 0.082365248), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[3, ], c(0.39772697, 0.18083546, 0.60828997, 0.66318671), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7), NA))) expect_output(print(x7)$show()) invisible(capture.output(expect_error(summary(x7), NA))) expect_output(summary(x7)$show()) x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL))) expect_equal(x7CodeBased$iterations, x7$iterations, tolerance = 1e-05) expect_equal(x7CodeBased$rejectAtLeastOne, x7$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x7CodeBased$rejectedArmsPerStage, x7$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$futilityStop, x7$futilityStop, tolerance = 1e-05) expect_equal(x7CodeBased$futilityPerStage, x7$futilityPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$earlyStop, x7$earlyStop, tolerance = 1e-05) expect_equal(x7CodeBased$successPerStage, x7$successPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$selectedArms, x7$selectedArms, tolerance = 1e-05) expect_equal(x7CodeBased$numberOfActiveArms, x7$numberOfActiveArms, tolerance = 1e-05) expect_equal(x7CodeBased$expectedNumberOfSubjects, x7$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x7CodeBased$sampleSizes, x7$sampleSizes, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalPowerAchieved, x7$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x7), "character") df <- as.data.frame(x7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x8 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "all", plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x8' with expected results expect_equal(x8$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x8$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x8$iterations[3, ], c(10, 10, 10, 10)) expect_equal(x8$rejectAtLeastOne, c(0.3, 0.6, 1, 1), tolerance = 1e-07) expect_equal(unlist(as.list(x8$rejectedArmsPerStage)), c(0, 0, 0, 0, 0.1, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0, 0.2, 0, 0, 0.2, 0.3, 0, 0, 0, 0.1, 0, 0.2, 0, 0.4, 0.2, 0, 0.7, 0.2, 0, 0.2, 0.1, 0.1, 0.2, 0.3, 0, 0.7, 0.3, 0, 0.8, 0.2), tolerance = 1e-07) expect_equal(x8$futilityStop, c(0, 0, 0, 0)) expect_equal(x8$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x8$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x8$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x8$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x8$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x8$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x8$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x8$selectedArms)), c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)) expect_equal(x8$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x8$numberOfActiveArms[2, ], c(4, 4, 4, 4)) expect_equal(x8$numberOfActiveArms[3, ], c(4, 4, 4, 4)) expect_equal(x8$expectedNumberOfSubjects, c(1050, 914.65115, 996.33236, 1027.6565), tolerance = 1e-07) expect_equal(unlist(as.list(x8$sampleSizes)), c(10, 100, 100, 10, 86.465115, 86.465115, 10, 94.633236, 94.633236, 10, 97.765652, 97.765652, 10, 100, 100, 10, 86.465115, 86.465115, 10, 94.633236, 94.633236, 10, 97.765652, 97.765652, 10, 100, 100, 10, 86.465115, 86.465115, 10, 94.633236, 94.633236, 10, 97.765652, 97.765652, 10, 100, 100, 10, 86.465115, 86.465115, 10, 94.633236, 94.633236, 10, 97.765652, 97.765652, 10, 100, 100, 10, 86.465115, 86.465115, 10, 94.633236, 94.633236, 10, 97.765652, 97.765652), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.015572779, 0.22941785, 0.084615364, 0.1668833), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[3, ], c(0.10350918, 0.24229761, 0.63483372, 0.79913622), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8), NA))) expect_output(print(x8)$show()) invisible(capture.output(expect_error(summary(x8), NA))) expect_output(summary(x8)$show()) x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL))) expect_equal(x8CodeBased$iterations, x8$iterations, tolerance = 1e-05) expect_equal(x8CodeBased$rejectAtLeastOne, x8$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x8CodeBased$rejectedArmsPerStage, x8$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$futilityStop, x8$futilityStop, tolerance = 1e-05) expect_equal(x8CodeBased$futilityPerStage, x8$futilityPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$earlyStop, x8$earlyStop, tolerance = 1e-05) expect_equal(x8CodeBased$successPerStage, x8$successPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$selectedArms, x8$selectedArms, tolerance = 1e-05) expect_equal(x8CodeBased$numberOfActiveArms, x8$numberOfActiveArms, tolerance = 1e-05) expect_equal(x8CodeBased$expectedNumberOfSubjects, x8$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x8CodeBased$sampleSizes, x8$sampleSizes, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalPowerAchieved, x8$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x8), "character") df <- as.data.frame(x8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x9 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "rBest", rValue = 2, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x9' with expected results expect_equal(x9$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x9$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x9$iterations[3, ], c(10, 9, 8, 7)) expect_equal(x9$rejectAtLeastOne, c(0.4, 0.6, 0.7, 0.9), tolerance = 1e-07) expect_equal(unlist(as.list(x9$rejectedArmsPerStage)), c(0, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.2, 0.3, 0, 0.1, 0, 0, 0, 0.1, 0, 0, 0.2, 0, 0.5, 0, 0, 0.3, 0, 0, 0.2, 0.1, 0, 0.1, 0.1, 0, 0.1, 0, 0.1, 0.5, 0.3), tolerance = 1e-07) expect_equal(x9$futilityStop, c(0, 0, 0, 0)) expect_equal(x9$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x9$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x9$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x9$earlyStop[2, ], c(0, 0.1, 0.2, 0.3), tolerance = 1e-07) expect_equal(x9$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x9$successPerStage[2, ], c(0, 0.1, 0.2, 0.3), tolerance = 1e-07) expect_equal(x9$successPerStage[3, ], c(0, 0, 0.2, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x9$selectedArms)), c(1, 0.5, 0.5, 1, 0.7, 0.6, 1, 0.1, 0.1, 1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0.9, 0.7, 1, 0.4, 0.4, 1, 0.5, 0.5, 1, 0.5, 0.5, 1, 0.6, 0.4, 1, 0.4, 0.1, 1, 0.7, 0.7, 1, 0.5, 0.4, 1, 0.4, 0.4, 1, 0.9, 0.6, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.8, 1, 1, 0.7), tolerance = 1e-07) expect_equal(x9$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x9$numberOfActiveArms[2, ], c(2, 2, 2, 2)) expect_equal(x9$numberOfActiveArms[3, ], c(2, 2, 2, 2)) expect_equal(x9$expectedNumberOfSubjects, c(541.86022, 465.03543, 438.85623, 427.93855), tolerance = 1e-07) expect_equal(unlist(as.list(x9$sampleSizes)), c(10, 42.315846, 42.315846, 10, 43.044196, 41.478749, 10, 10, 12.5, 10, 28.887554, 41.267934, 10, 15.358913, 15.358913, 10, 21.683959, 24.093288, 10, 70.857557, 63.571946, 10, 27.933797, 39.905424, 10, 46.61779, 46.61779, 10, 34.631951, 38.479946, 10, 49.194842, 36.493552, 10, 31.168408, 1.6691539, 10, 59.660857, 59.660857, 10, 44.698358, 43.316707, 10, 19.566345, 24.457932, 10, 67.989758, 54.271083, 10, 81.976703, 81.976703, 10, 72.029232, 73.684344, 10, 74.809372, 68.511715, 10, 77.989758, 68.556797), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.085169097, 0.1203719, 0.19239671, 0.15260753), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[3, ], c(0.20442999, 0.2985599, 0.51072411, 0.55234699), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x9), NA))) expect_output(print(x9)$show()) invisible(capture.output(expect_error(summary(x9), NA))) expect_output(summary(x9)$show()) x9CodeBased <- eval(parse(text = getObjectRCode(x9, stringWrapParagraphWidth = NULL))) expect_equal(x9CodeBased$iterations, x9$iterations, tolerance = 1e-05) expect_equal(x9CodeBased$rejectAtLeastOne, x9$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x9CodeBased$rejectedArmsPerStage, x9$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$futilityStop, x9$futilityStop, tolerance = 1e-05) expect_equal(x9CodeBased$futilityPerStage, x9$futilityPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$earlyStop, x9$earlyStop, tolerance = 1e-05) expect_equal(x9CodeBased$successPerStage, x9$successPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$selectedArms, x9$selectedArms, tolerance = 1e-05) expect_equal(x9CodeBased$numberOfActiveArms, x9$numberOfActiveArms, tolerance = 1e-05) expect_equal(x9CodeBased$expectedNumberOfSubjects, x9$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x9CodeBased$sampleSizes, x9$sampleSizes, tolerance = 1e-05) expect_equal(x9CodeBased$conditionalPowerAchieved, x9$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x9), "character") df <- as.data.frame(x9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x10 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), intersectionTest = "Bonferroni", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x10' with expected results expect_equal(x10$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x10$iterations[2, ], c(7, 8, 5, 9)) expect_equal(x10$iterations[3, ], c(7, 6, 4, 4)) expect_equal(x10$rejectAtLeastOne, c(0.2, 0.4, 0.2, 0.6), tolerance = 1e-07) expect_equal(unlist(as.list(x10$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0.1, 0.1, 0, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0.1, 0, 0, 0.2, 0, 0.1, 0.1, 0, 0.2, 0.1), tolerance = 1e-07) expect_equal(x10$futilityStop, c(0.3, 0.2, 0.5, 0.1), tolerance = 1e-07) expect_equal(x10$futilityPerStage[1, ], c(0.3, 0.2, 0.5, 0.1), tolerance = 1e-07) expect_equal(x10$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x10$earlyStop[1, ], c(0.3, 0.2, 0.5, 0.1), tolerance = 1e-07) expect_equal(x10$earlyStop[2, ], c(0, 0.2, 0.1, 0.5), tolerance = 1e-07) expect_equal(x10$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x10$successPerStage[2, ], c(0, 0.2, 0.1, 0.5), tolerance = 1e-07) expect_equal(x10$successPerStage[3, ], c(0.2, 0.2, 0.1, 0), tolerance = 1e-07) expect_equal(unlist(as.list(x10$selectedArms)), c(1, 0.1, 0.1, 1, 0, 0, 1, 0, 0, 1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0.1, 0, 1, 0.2, 0.2, 1, 0.2, 0, 1, 0.1, 0.1, 1, 0.4, 0.3, 1, 0, 0, 1, 0.2, 0.1, 1, 0.4, 0.4, 1, 0.4, 0.4, 1, 0.4, 0.3, 1, 0.3, 0.1, 1, 0.7, 0.7, 1, 0.8, 0.6, 1, 0.5, 0.4, 1, 0.9, 0.4), tolerance = 1e-07) expect_equal(x10$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x10$numberOfActiveArms[2, ], c(1.2857143, 1.125, 1.2, 1.1111111), tolerance = 1e-07) expect_equal(x10$numberOfActiveArms[3, ], c(1.2857143, 1.1666667, 1.25, 1.25), tolerance = 1e-07) expect_equal(x10$expectedNumberOfSubjects, c(225.54374, 222.86662, 137.52897, 198.07751), tolerance = 1e-07) expect_equal(unlist(as.list(x10$sampleSizes)), c(10, 5.7796177, 5.7796177, 10, 0, 0, 10, 0, 0, 10, 21.972849, 49.43891, 10, 18.318062, 18.318062, 10, 4.015823, 0, 10, 19.919121, 24.898901, 10, 4.2855233, 0, 10, 4.0944014, 4.0944014, 10, 25.284305, 32.792226, 10, 0, 0, 10, 13.080039, 4.4300867, 10, 40.432794, 40.432794, 10, 33.32367, 44.431559, 10, 42.475692, 28.104858, 10, 18.985094, 14.869399, 10, 56.76351, 56.76351, 10, 50.123797, 60.557119, 10, 45.125964, 31.417698, 10, 51.714883, 53.868997), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.051011725, 0.14528092, 0.099325934, 0.10008765), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[3, ], c(0.1199627, 0.35325827, 0.33382798, 0.10956309), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x10), NA))) expect_output(print(x10)$show()) invisible(capture.output(expect_error(summary(x10), NA))) expect_output(summary(x10)$show()) x10CodeBased <- eval(parse(text = getObjectRCode(x10, stringWrapParagraphWidth = NULL))) expect_equal(x10CodeBased$iterations, x10$iterations, tolerance = 1e-05) expect_equal(x10CodeBased$rejectAtLeastOne, x10$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x10CodeBased$rejectedArmsPerStage, x10$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$futilityStop, x10$futilityStop, tolerance = 1e-05) expect_equal(x10CodeBased$futilityPerStage, x10$futilityPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$earlyStop, x10$earlyStop, tolerance = 1e-05) expect_equal(x10CodeBased$successPerStage, x10$successPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$selectedArms, x10$selectedArms, tolerance = 1e-05) expect_equal(x10CodeBased$numberOfActiveArms, x10$numberOfActiveArms, tolerance = 1e-05) expect_equal(x10CodeBased$expectedNumberOfSubjects, x10$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x10CodeBased$sampleSizes, x10$sampleSizes, tolerance = 1e-05) expect_equal(x10CodeBased$conditionalPowerAchieved, x10$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x10), "character") df <- as.data.frame(x10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x11 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.1, 0.3, 0.1), intersectionTest = "Bonferroni", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x11' with expected results expect_equal(x11$iterations[1, ], c(10, 10, 10)) expect_equal(x11$iterations[2, ], c(9, 6, 6)) expect_equal(x11$iterations[3, ], c(9, 5, 4)) expect_equal(x11$rejectAtLeastOne, c(0, 0, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x11$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x11$futilityStop, c(0.1, 0.5, 0.6), tolerance = 1e-07) expect_equal(x11$futilityPerStage[1, ], c(0.1, 0.4, 0.4), tolerance = 1e-07) expect_equal(x11$futilityPerStage[2, ], c(0, 0.1, 0.2), tolerance = 1e-07) expect_equal(x11$earlyStop[1, ], c(0.1, 0.4, 0.4), tolerance = 1e-07) expect_equal(x11$earlyStop[2, ], c(0, 0.1, 0.2), tolerance = 1e-07) expect_equal(x11$successPerStage[1, ], c(0, 0, 0)) expect_equal(x11$successPerStage[2, ], c(0, 0, 0)) expect_equal(x11$successPerStage[3, ], c(0, 0, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x11$selectedArms)), c(1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.1, 0, 1, 0.3, 0.3, 1, 0.3, 0.2, 1, 0.2, 0.1, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.2, 0.2, 1, 0.3, 0.3, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.9, 0.9, 1, 0.6, 0.5, 1, 0.6, 0.4), tolerance = 1e-07) expect_equal(x11$numberOfActiveArms[1, ], c(4, 4, 4)) expect_equal(x11$numberOfActiveArms[2, ], c(1, 1, 1)) expect_equal(x11$numberOfActiveArms[3, ], c(1, 1, 1)) expect_equal(x11$expectedNumberOfSubjects, c(293.83569, 240.03958, 175.41029), tolerance = 1e-07) expect_equal(unlist(as.list(x11$sampleSizes)), c(10, 1.428489, 11.111111, 10, 16.666667, 20, 10, 10.322237, 0, 10, 9.8699583, 33.333333, 10, 41.973847, 40, 10, 21.511686, 25, 10, 15.186109, 22.222222, 10, 6.5876644, 11.765765, 10, 17.33069, 33.465374, 10, 17.556106, 24.756944, 10, 16.666667, 20, 10, 2.0321899, 21.502286, 10, 44.040662, 91.42361, 10, 81.894844, 91.765765, 10, 51.196803, 79.96766), tolerance = 1e-07) expect_equal(x11$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x11$conditionalPowerAchieved[2, ], c(0.038698548, 0.10704476, 0.043430379), tolerance = 1e-07) expect_equal(x11$conditionalPowerAchieved[3, ], c(0.30869297, 0.27823314, 0.60162296), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x11), NA))) expect_output(print(x11)$show()) invisible(capture.output(expect_error(summary(x11), NA))) expect_output(summary(x11)$show()) x11CodeBased <- eval(parse(text = getObjectRCode(x11, stringWrapParagraphWidth = NULL))) expect_equal(x11CodeBased$iterations, x11$iterations, tolerance = 1e-05) expect_equal(x11CodeBased$rejectAtLeastOne, x11$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x11CodeBased$rejectedArmsPerStage, x11$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$futilityStop, x11$futilityStop, tolerance = 1e-05) expect_equal(x11CodeBased$futilityPerStage, x11$futilityPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$earlyStop, x11$earlyStop, tolerance = 1e-05) expect_equal(x11CodeBased$successPerStage, x11$successPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$selectedArms, x11$selectedArms, tolerance = 1e-05) expect_equal(x11CodeBased$numberOfActiveArms, x11$numberOfActiveArms, tolerance = 1e-05) expect_equal(x11CodeBased$expectedNumberOfSubjects, x11$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x11CodeBased$sampleSizes, x11$sampleSizes, tolerance = 1e-05) expect_equal(x11CodeBased$conditionalPowerAchieved, x11$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x11), "character") df <- as.data.frame(x11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x12 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, threshold = 0, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), intersectionTest = "Bonferroni", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x12' with expected results expect_equal(x12$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x12$iterations[2, ], c(10, 6, 8, 8)) expect_equal(x12$iterations[3, ], c(8, 5, 1, 2)) expect_equal(x12$rejectAtLeastOne, c(0.3, 0.1, 0.7, 0.7), tolerance = 1e-07) expect_equal(unlist(as.list(x12$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1, 0.1, 0.1, 0.1, 0.1, 0, 0.1, 0, 0.2, 0.3, 0, 0.1, 0.3, 0), tolerance = 1e-07) expect_equal(x12$futilityStop, c(0, 0.4, 0.2, 0.2), tolerance = 1e-07) expect_equal(x12$futilityPerStage[1, ], c(0, 0.4, 0.2, 0.2), tolerance = 1e-07) expect_equal(x12$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x12$earlyStop[1, ], c(0, 0.4, 0.2, 0.2), tolerance = 1e-07) expect_equal(x12$earlyStop[2, ], c(0.2, 0.1, 0.7, 0.6), tolerance = 1e-07) expect_equal(x12$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x12$successPerStage[2, ], c(0.2, 0.1, 0.7, 0.6), tolerance = 1e-07) expect_equal(x12$successPerStage[3, ], c(0.1, 0, 0, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x12$selectedArms)), c(1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.1, 0, 1, 0, 0, 1, 0.3, 0.3, 1, 0.1, 0.1, 1, 0, 0, 1, 0.1, 0, 1, 0.1, 0.1, 1, 0.2, 0.2, 1, 0.1, 0, 1, 0.3, 0.2, 1, 0.5, 0.3, 1, 0.2, 0.1, 1, 0.6, 0.1, 1, 0.4, 0, 1, 1, 0.8, 1, 0.6, 0.5, 1, 0.8, 0.1, 1, 0.8, 0.2), tolerance = 1e-07) expect_equal(x12$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x12$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x12$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x12$expectedNumberOfSubjects, c(270.86167, 201.58944, 127.72687, 185.63922), tolerance = 1e-07) expect_equal(unlist(as.list(x12$sampleSizes)), c(10, 1.1167748, 12.5, 10, 8.9578499, 20, 10, 0.5, 0, 10, 0, 0, 10, 6.7277808, 32.819107, 10, 6.3724427, 20, 10, 0, 0, 10, 12.5, 0, 10, 2.4005123, 12.5, 10, 12.766635, 29.774077, 10, 11.503905, 0, 10, 27.658054, 100, 10, 28.865098, 31.331731, 10, 23.415877, 20, 10, 24.075387, 100, 10, 19.616461, 0, 10, 39.110166, 89.150838, 10, 51.512805, 89.774077, 10, 36.079292, 100, 10, 59.774515, 100), tolerance = 1e-07) expect_equal(x12$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x12$conditionalPowerAchieved[2, ], c(0.064552587, 0.074113563, 0.13271614, 0.12195746), tolerance = 1e-07) expect_equal(x12$conditionalPowerAchieved[3, ], c(0.41775137, 0.42792704, 0.6049542, 0.13870598), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x12), NA))) expect_output(print(x12)$show()) invisible(capture.output(expect_error(summary(x12), NA))) expect_output(summary(x12)$show()) x12CodeBased <- eval(parse(text = getObjectRCode(x12, stringWrapParagraphWidth = NULL))) expect_equal(x12CodeBased$iterations, x12$iterations, tolerance = 1e-05) expect_equal(x12CodeBased$rejectAtLeastOne, x12$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x12CodeBased$rejectedArmsPerStage, x12$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$futilityStop, x12$futilityStop, tolerance = 1e-05) expect_equal(x12CodeBased$futilityPerStage, x12$futilityPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$earlyStop, x12$earlyStop, tolerance = 1e-05) expect_equal(x12CodeBased$successPerStage, x12$successPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$selectedArms, x12$selectedArms, tolerance = 1e-05) expect_equal(x12CodeBased$numberOfActiveArms, x12$numberOfActiveArms, tolerance = 1e-05) expect_equal(x12CodeBased$expectedNumberOfSubjects, x12$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x12CodeBased$sampleSizes, x12$sampleSizes, tolerance = 1e-05) expect_equal(x12CodeBased$conditionalPowerAchieved, x12$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x12), "character") df <- as.data.frame(x12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x13 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), typeOfShape = "userDefined", activeArms = 4, threshold = 0, plannedSubjects = c(10, 30, 50), stDev = 1.2, adaptations = rep(TRUE, 2), effectMatrix = matrix(c(0.1, 0.2, 0.3, 0.4, 0.2, 0.3, 0.4, 0.5), ncol = 4), intersectionTest = "Sidak", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x13' with expected results expect_equal(x13$iterations[1, ], c(10, 10)) expect_equal(x13$iterations[2, ], c(10, 9)) expect_equal(x13$iterations[3, ], c(7, 7)) expect_equal(x13$rejectAtLeastOne, c(0.3, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x13$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.2, 0, 0, 0.1, 0.1), tolerance = 1e-07) expect_equal(x13$futilityStop, c(0, 0.1), tolerance = 1e-07) expect_equal(x13$futilityPerStage[1, ], c(0, 0.1), tolerance = 1e-07) expect_equal(x13$futilityPerStage[2, ], c(0, 0)) expect_equal(x13$earlyStop[1, ], c(0, 0.1), tolerance = 1e-07) expect_equal(x13$earlyStop[2, ], c(0.3, 0.2), tolerance = 1e-07) expect_equal(x13$successPerStage[1, ], c(0, 0)) expect_equal(x13$successPerStage[2, ], c(0.3, 0.2), tolerance = 1e-07) expect_equal(x13$successPerStage[3, ], c(0, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x13$selectedArms)), c(1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.3, 0.3, 1, 0.5, 0.4, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.5, 0.2, 1, 0.2, 0.1, 1, 1, 0.7, 1, 0.9, 0.7), tolerance = 1e-07) expect_equal(x13$numberOfActiveArms[1, ], c(4, 4)) expect_equal(x13$numberOfActiveArms[2, ], c(1, 1)) expect_equal(x13$numberOfActiveArms[3, ], c(1, 1)) expect_equal(x13$expectedNumberOfSubjects, c(238.16649, 275.50348), tolerance = 1e-07) expect_equal(unlist(as.list(x13$sampleSizes)), c(10, 1.0395374, 14.285714, 10, 4.3933102, 11.199547, 10, 4.4634729, 31.899994, 10, 38.793234, 57.142857, 10, 2.5722467, 14.285714, 10, 5.3695979, 6.9814836, 10, 23.677991, 28.571429, 10, 11.241946, 8.8667681, 10, 31.753247, 89.042851, 10, 59.798088, 84.190656), tolerance = 1e-07) expect_equal(x13$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_)) expect_equal(x13$conditionalPowerAchieved[2, ], c(0.095374468, 0.085831831), tolerance = 1e-07) expect_equal(x13$conditionalPowerAchieved[3, ], c(0.56669649, 0.49770257), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x13), NA))) expect_output(print(x13)$show()) invisible(capture.output(expect_error(summary(x13), NA))) expect_output(summary(x13)$show()) x13CodeBased <- eval(parse(text = getObjectRCode(x13, stringWrapParagraphWidth = NULL))) expect_equal(x13CodeBased$iterations, x13$iterations, tolerance = 1e-05) expect_equal(x13CodeBased$rejectAtLeastOne, x13$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x13CodeBased$rejectedArmsPerStage, x13$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x13CodeBased$futilityStop, x13$futilityStop, tolerance = 1e-05) expect_equal(x13CodeBased$futilityPerStage, x13$futilityPerStage, tolerance = 1e-05) expect_equal(x13CodeBased$earlyStop, x13$earlyStop, tolerance = 1e-05) expect_equal(x13CodeBased$successPerStage, x13$successPerStage, tolerance = 1e-05) expect_equal(x13CodeBased$selectedArms, x13$selectedArms, tolerance = 1e-05) expect_equal(x13CodeBased$numberOfActiveArms, x13$numberOfActiveArms, tolerance = 1e-05) expect_equal(x13CodeBased$expectedNumberOfSubjects, x13$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x13CodeBased$sampleSizes, x13$sampleSizes, tolerance = 1e-05) expect_equal(x13CodeBased$conditionalPowerAchieved, x13$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x13), "character") df <- as.data.frame(x13) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x13) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x14 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), typeOfShape = "sigmoidEmax", gED50 = 2, slope = 0.5, activeArms = 4, threshold = 0, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), intersectionTest = "Sidak", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x14' with expected results expect_equal(x14$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x14$iterations[2, ], c(10, 9, 8, 10)) expect_equal(x14$iterations[3, ], c(9, 9, 6, 7)) expect_equal(x14$rejectAtLeastOne, c(0.1, 0, 0.3, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x14$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0.1, 0.1), tolerance = 1e-07) expect_equal(x14$futilityStop, c(0, 0.1, 0.2, 0), tolerance = 1e-07) expect_equal(x14$futilityPerStage[1, ], c(0, 0.1, 0.2, 0), tolerance = 1e-07) expect_equal(x14$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x14$earlyStop[1, ], c(0, 0.1, 0.2, 0), tolerance = 1e-07) expect_equal(x14$earlyStop[2, ], c(0.1, 0, 0.2, 0.3), tolerance = 1e-07) expect_equal(x14$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x14$successPerStage[2, ], c(0.1, 0, 0.2, 0.3), tolerance = 1e-07) expect_equal(x14$successPerStage[3, ], c(0, 0, 0.1, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x14$selectedArms)), c(1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.1, 0, 1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0.1, 0.1, 1, 0.1, 0, 1, 0.2, 0.2, 1, 0.3, 0.3, 1, 0.2, 0.1, 1, 0.2, 0.2, 1, 0.4, 0.3, 1, 0.1, 0.1, 1, 0.4, 0.4, 1, 0.4, 0.2, 1, 1, 0.9, 1, 0.9, 0.9, 1, 0.8, 0.6, 1, 1, 0.7), tolerance = 1e-07) expect_equal(x14$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x14$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x14$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x14$expectedNumberOfSubjects, c(302.82831, 359.55539, 205.66054, 326.21609), tolerance = 1e-07) expect_equal(unlist(as.list(x14$sampleSizes)), c(10, 0.96871141, 11.111111, 10, 4.8692533, 11.111111, 10, 0.5, 0, 10, 30, 42.857143, 10, 6.7277808, 29.172539, 10, 37.581628, 44.444444, 10, 12.5, 16.666667, 10, 10, 0, 10, 12.834638, 22.222222, 10, 21.991558, 33.249006, 10, 17.610119, 16.666667, 10, 12.962323, 28.571429, 10, 24.585127, 27.825125, 10, 7.6171061, 11.111111, 10, 20.182233, 28.660644, 10, 22.561443, 17.977538, 10, 45.116257, 90.330997, 10, 72.059546, 99.915673, 10, 50.792352, 61.993977, 10, 75.523767, 89.406109), tolerance = 1e-07) expect_equal(x14$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x14$conditionalPowerAchieved[2, ], c(0.054394525, 0.033810654, 0.16623293, 0.07472066), tolerance = 1e-07) expect_equal(x14$conditionalPowerAchieved[3, ], c(0.39787587, 0.27550431, 0.64928935, 0.24074486), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x14), NA))) expect_output(print(x14)$show()) invisible(capture.output(expect_error(summary(x14), NA))) expect_output(summary(x14)$show()) x14CodeBased <- eval(parse(text = getObjectRCode(x14, stringWrapParagraphWidth = NULL))) expect_equal(x14CodeBased$iterations, x14$iterations, tolerance = 1e-05) expect_equal(x14CodeBased$rejectAtLeastOne, x14$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x14CodeBased$rejectedArmsPerStage, x14$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x14CodeBased$futilityStop, x14$futilityStop, tolerance = 1e-05) expect_equal(x14CodeBased$futilityPerStage, x14$futilityPerStage, tolerance = 1e-05) expect_equal(x14CodeBased$earlyStop, x14$earlyStop, tolerance = 1e-05) expect_equal(x14CodeBased$successPerStage, x14$successPerStage, tolerance = 1e-05) expect_equal(x14CodeBased$selectedArms, x14$selectedArms, tolerance = 1e-05) expect_equal(x14CodeBased$numberOfActiveArms, x14$numberOfActiveArms, tolerance = 1e-05) expect_equal(x14CodeBased$expectedNumberOfSubjects, x14$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x14CodeBased$sampleSizes, x14$sampleSizes, tolerance = 1e-05) expect_equal(x14CodeBased$conditionalPowerAchieved, x14$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x14), "character") df <- as.data.frame(x14) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x14) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x15 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "all", plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), intersectionTest = "Sidak", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x15' with expected results expect_equal(x15$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x15$iterations[2, ], c(10, 9, 9, 10)) expect_equal(x15$iterations[3, ], c(10, 8, 8, 10)) expect_equal(x15$rejectAtLeastOne, c(0.1, 0.6, 0.9, 1), tolerance = 1e-07) expect_equal(unlist(as.list(x15$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0.1, 0, 0, 0, 0, 0.2, 0, 0.1, 0.2, 0, 0, 0, 0, 0.2, 0.1, 0, 0.3, 0.3, 0, 0.3, 0.4, 0, 0.1, 0, 0, 0.3, 0.1, 0, 0.5, 0.2, 0.1, 0.4, 0.2), tolerance = 1e-07) expect_equal(x15$futilityStop, c(0, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(x15$futilityPerStage[1, ], c(0, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(x15$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x15$earlyStop[1, ], c(0, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(x15$earlyStop[2, ], c(0, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(x15$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x15$successPerStage[2, ], c(0, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(x15$successPerStage[3, ], c(0, 0, 0.3, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x15$selectedArms)), c(1, 0.6, 0.6, 1, 0.6, 0.5, 1, 0.2, 0.1, 1, 0.7, 0.5, 1, 0.7, 0.6, 1, 0.8, 0.7, 1, 0.7, 0.7, 1, 0.7, 0.7, 1, 0.7, 0.7, 1, 0.8, 0.7, 1, 0.7, 0.7, 1, 0.9, 0.9, 1, 0.6, 0.6, 1, 0.6, 0.5, 1, 0.7, 0.6, 1, 0.8, 0.8, 1, 1, 1, 1, 0.9, 0.8, 1, 0.9, 0.8, 1, 1, 1), tolerance = 1e-07) expect_equal(x15$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x15$numberOfActiveArms[2, ], c(2.6, 3.1111111, 2.5555556, 3.1), tolerance = 1e-07) expect_equal(x15$numberOfActiveArms[3, ], c(2.5, 3, 2.625, 2.9), tolerance = 1e-07) expect_equal(x15$expectedNumberOfSubjects, c(690.38911, 619.77858, 554.02061, 670.88154), tolerance = 1e-07) expect_equal(unlist(as.list(x15$sampleSizes)), c(10, 54.180167, 50.4, 10, 57.917242, 50.5, 10, 16.188147, 12.5, 10, 64.800747, 25.135561, 10, 65.454083, 50.4, 10, 71.01474, 75.5, 10, 71.743702, 62.866861, 10, 64.800747, 45.135561, 10, 69.120607, 60.4, 10, 71.01474, 75.5, 10, 71.743702, 62.866861, 10, 84.800747, 55.535561, 10, 55.454083, 50.4, 10, 48.792518, 50.5, 10, 71.743702, 50.366861, 10, 74.800747, 45.535561, 10, 94.180167, 90.4, 10, 82.125851, 88, 10, 93.965925, 75.366861, 10, 94.800747, 65.535561), tolerance = 1e-07) expect_equal(x15$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x15$conditionalPowerAchieved[2, ], c(0.086326519, 0.23897424, 0.15375141, 0.19252038), tolerance = 1e-07) expect_equal(x15$conditionalPowerAchieved[3, ], c(0.19907656, 0.37086672, 0.52811383, 0.57866018), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x15), NA))) expect_output(print(x15)$show()) invisible(capture.output(expect_error(summary(x15), NA))) expect_output(summary(x15)$show()) x15CodeBased <- eval(parse(text = getObjectRCode(x15, stringWrapParagraphWidth = NULL))) expect_equal(x15CodeBased$iterations, x15$iterations, tolerance = 1e-05) expect_equal(x15CodeBased$rejectAtLeastOne, x15$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x15CodeBased$rejectedArmsPerStage, x15$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x15CodeBased$futilityStop, x15$futilityStop, tolerance = 1e-05) expect_equal(x15CodeBased$futilityPerStage, x15$futilityPerStage, tolerance = 1e-05) expect_equal(x15CodeBased$earlyStop, x15$earlyStop, tolerance = 1e-05) expect_equal(x15CodeBased$successPerStage, x15$successPerStage, tolerance = 1e-05) expect_equal(x15CodeBased$selectedArms, x15$selectedArms, tolerance = 1e-05) expect_equal(x15CodeBased$numberOfActiveArms, x15$numberOfActiveArms, tolerance = 1e-05) expect_equal(x15CodeBased$expectedNumberOfSubjects, x15$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x15CodeBased$sampleSizes, x15$sampleSizes, tolerance = 1e-05) expect_equal(x15CodeBased$conditionalPowerAchieved, x15$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x15), "character") df <- as.data.frame(x15) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x15) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x16 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "rBest", rValue = 2, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), intersectionTest = "Simes", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x16' with expected results expect_equal(x16$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x16$iterations[2, ], c(8, 8, 9, 10)) expect_equal(x16$iterations[3, ], c(8, 8, 8, 7)) expect_equal(x16$rejectAtLeastOne, c(0.1, 0.5, 0.7, 0.8), tolerance = 1e-07) expect_equal(unlist(as.list(x16$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1, 0, 0.1, 0.1, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0, 0.2, 0.1, 0, 0.1, 0, 0.1, 0.2, 0.2, 0, 0.1, 0.4, 0.1, 0.6, 0), tolerance = 1e-07) expect_equal(x16$futilityStop, c(0.2, 0.2, 0.1, 0), tolerance = 1e-07) expect_equal(x16$futilityPerStage[1, ], c(0.2, 0.2, 0.1, 0), tolerance = 1e-07) expect_equal(x16$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x16$earlyStop[1, ], c(0.2, 0.2, 0.1, 0), tolerance = 1e-07) expect_equal(x16$earlyStop[2, ], c(0, 0, 0.1, 0.3), tolerance = 1e-07) expect_equal(x16$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x16$successPerStage[2, ], c(0, 0, 0.1, 0.3), tolerance = 1e-07) expect_equal(x16$successPerStage[3, ], c(0.1, 0.1, 0.2, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x16$selectedArms)), c(1, 0.2, 0.1, 1, 0.6, 0.6, 1, 0.4, 0.4, 1, 0.1, 0.1, 1, 0.3, 0.3, 1, 0.2, 0.2, 1, 0.3, 0.2, 1, 0.5, 0.5, 1, 0.3, 0.3, 1, 0.1, 0.1, 1, 0.4, 0.3, 1, 0.5, 0.3, 1, 0.8, 0.8, 1, 0.7, 0.7, 1, 0.5, 0.5, 1, 0.7, 0.4, 1, 0.8, 0.8, 1, 0.8, 0.8, 1, 0.9, 0.8, 1, 1, 0.7), tolerance = 1e-07) expect_equal(x16$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x16$numberOfActiveArms[2, ], c(2, 2, 1.7777778, 1.8), tolerance = 1e-07) expect_equal(x16$numberOfActiveArms[3, ], c(1.875, 2, 1.75, 1.8571429), tolerance = 1e-07) expect_equal(x16$expectedNumberOfSubjects, c(485.19749, 377.01763, 431.09127, 345.60572), tolerance = 1e-07) expect_equal(unlist(as.list(x16$sampleSizes)), c(10, 25, 12.5, 10, 52.984739, 51, 10, 38.255848, 50, 10, 5.1691192, 14.285714, 10, 28.803833, 37.5, 10, 25, 25, 10, 24.228929, 11.497164, 10, 28.635967, 34.757362, 10, 31.69512, 37.5, 10, 5.6938105, 1.5787961, 10, 40.9155, 37.5, 10, 42.851335, 17.605103, 10, 85.498953, 100, 10, 58.678549, 52.578796, 10, 35.341046, 48.997164, 10, 50.953751, 18.295116, 10, 85.498953, 100, 10, 71.178549, 65.078796, 10, 76.256545, 86.497164, 10, 73.805086, 49.614505), tolerance = 1e-07) expect_equal(x16$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x16$conditionalPowerAchieved[2, ], c(0.017664185, 0.17480419, 0.093445917, 0.088580327), tolerance = 1e-07) expect_equal(x16$conditionalPowerAchieved[3, ], c(0.16524243, 0.38443342, 0.48058247, 0.6510419), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x16), NA))) expect_output(print(x16)$show()) invisible(capture.output(expect_error(summary(x16), NA))) expect_output(summary(x16)$show()) x16CodeBased <- eval(parse(text = getObjectRCode(x16, stringWrapParagraphWidth = NULL))) expect_equal(x16CodeBased$iterations, x16$iterations, tolerance = 1e-05) expect_equal(x16CodeBased$rejectAtLeastOne, x16$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x16CodeBased$rejectedArmsPerStage, x16$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x16CodeBased$futilityStop, x16$futilityStop, tolerance = 1e-05) expect_equal(x16CodeBased$futilityPerStage, x16$futilityPerStage, tolerance = 1e-05) expect_equal(x16CodeBased$earlyStop, x16$earlyStop, tolerance = 1e-05) expect_equal(x16CodeBased$successPerStage, x16$successPerStage, tolerance = 1e-05) expect_equal(x16CodeBased$selectedArms, x16$selectedArms, tolerance = 1e-05) expect_equal(x16CodeBased$numberOfActiveArms, x16$numberOfActiveArms, tolerance = 1e-05) expect_equal(x16CodeBased$expectedNumberOfSubjects, x16$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x16CodeBased$sampleSizes, x16$sampleSizes, tolerance = 1e-05) expect_equal(x16CodeBased$conditionalPowerAchieved, x16$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x16), "character") df <- as.data.frame(x16) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x16) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x17 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), intersectionTest = "Simes", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x17' with expected results expect_equal(x17$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x17$iterations[2, ], c(9, 10, 10, 10)) expect_equal(x17$iterations[3, ], c(7, 8, 5, 5)) expect_equal(x17$rejectAtLeastOne, c(0.3, 0.2, 0.9, 0.8), tolerance = 1e-07) expect_equal(unlist(as.list(x17$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1, 0.1, 0, 0.2, 0, 0, 0.1, 0.4, 0, 0.2, 0, 0, 0, 0.1, 0, 0.1, 0, 0.2, 0.3, 0.1, 0, 0.2, 0.4), tolerance = 1e-07) expect_equal(x17$futilityStop, c(0.2, 0.1, 0, 0.1), tolerance = 1e-07) expect_equal(x17$futilityPerStage[1, ], c(0.1, 0, 0, 0), tolerance = 1e-07) expect_equal(x17$futilityPerStage[2, ], c(0.1, 0.1, 0, 0.1), tolerance = 1e-07) expect_equal(x17$earlyStop[1, ], c(0.1, 0, 0, 0), tolerance = 1e-07) expect_equal(x17$earlyStop[2, ], c(0.2, 0.2, 0.5, 0.5), tolerance = 1e-07) expect_equal(x17$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x17$successPerStage[2, ], c(0.1, 0.1, 0.5, 0.4), tolerance = 1e-07) expect_equal(x17$successPerStage[3, ], c(0.2, 0.1, 0.4, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x17$selectedArms)), c(1, 0.1, 0.1, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.1, 0, 1, 0.4, 0.3, 1, 0.4, 0.2, 1, 0, 0, 1, 0.2, 0.1, 1, 0.2, 0.1, 1, 0.3, 0.2, 1, 0.6, 0.5, 1, 0.3, 0.1, 1, 0.4, 0.4, 1, 0.3, 0.3, 1, 0.6, 0.1, 1, 0.6, 0.4, 1, 0.9, 0.7, 1, 1, 0.8, 1, 1, 0.5, 1, 1, 0.5), tolerance = 1e-07) expect_equal(x17$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x17$numberOfActiveArms[2, ], c(1.2222222, 1.2, 1.3, 1.2), tolerance = 1e-07) expect_equal(x17$numberOfActiveArms[3, ], c(1.2857143, 1.125, 1.4, 1.2), tolerance = 1e-07) expect_equal(x17$expectedNumberOfSubjects, c(328.39002, 302.69421, 285.23022, 240.4545), tolerance = 1e-07) expect_equal(unlist(as.list(x17$sampleSizes)), c(10, 4.4952582, 14.285714, 10, 19.967039, 25, 10, 10, 20, 10, 10, 0, 10, 21.883735, 42.857143, 10, 26.51119, 25, 10, 0, 0, 10, 13.162215, 6.3433684, 10, 14.295646, 14.285714, 10, 12.191217, 8.9015119, 10, 34.361222, 100, 10, 22.260169, 5.4863466, 10, 27.97297, 57.142857, 10, 13.444855, 22.756787, 10, 23.167319, 20, 10, 26.747723, 50.618475, 10, 62.896861, 100, 10, 55.457645, 74.744525, 10, 47.701679, 100, 10, 59.007892, 56.104821), tolerance = 1e-07) expect_equal(x17$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x17$conditionalPowerAchieved[2, ], c(0.025620238, 0.099222073, 0.15711506, 0.067612991), tolerance = 1e-07) expect_equal(x17$conditionalPowerAchieved[3, ], c(0.2137719, 0.30848358, 0.15636561, 0.6965125), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x17), NA))) expect_output(print(x17)$show()) invisible(capture.output(expect_error(summary(x17), NA))) expect_output(summary(x17)$show()) x17CodeBased <- eval(parse(text = getObjectRCode(x17, stringWrapParagraphWidth = NULL))) expect_equal(x17CodeBased$iterations, x17$iterations, tolerance = 1e-05) expect_equal(x17CodeBased$rejectAtLeastOne, x17$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x17CodeBased$rejectedArmsPerStage, x17$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x17CodeBased$futilityStop, x17$futilityStop, tolerance = 1e-05) expect_equal(x17CodeBased$futilityPerStage, x17$futilityPerStage, tolerance = 1e-05) expect_equal(x17CodeBased$earlyStop, x17$earlyStop, tolerance = 1e-05) expect_equal(x17CodeBased$successPerStage, x17$successPerStage, tolerance = 1e-05) expect_equal(x17CodeBased$selectedArms, x17$selectedArms, tolerance = 1e-05) expect_equal(x17CodeBased$numberOfActiveArms, x17$numberOfActiveArms, tolerance = 1e-05) expect_equal(x17CodeBased$expectedNumberOfSubjects, x17$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x17CodeBased$sampleSizes, x17$sampleSizes, tolerance = 1e-05) expect_equal(x17CodeBased$conditionalPowerAchieved, x17$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x17), "character") df <- as.data.frame(x17) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x17) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x18 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), intersectionTest = "Simes", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x18' with expected results expect_equal(x18$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x18$iterations[2, ], c(10, 9, 8, 10)) expect_equal(x18$iterations[3, ], c(7, 8, 1, 4)) expect_equal(x18$rejectAtLeastOne, c(0.3, 0.1, 0.7, 0.6), tolerance = 1e-07) expect_equal(unlist(as.list(x18$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1, 0, 0.1, 0.2, 0, 0, 0.1, 0, 0.2, 0.3, 0, 0.1, 0.3, 0), tolerance = 1e-07) expect_equal(x18$futilityStop, c(0, 0.1, 0.2, 0), tolerance = 1e-07) expect_equal(x18$futilityPerStage[1, ], c(0, 0.1, 0.2, 0), tolerance = 1e-07) expect_equal(x18$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x18$earlyStop[1, ], c(0, 0.1, 0.2, 0), tolerance = 1e-07) expect_equal(x18$earlyStop[2, ], c(0.3, 0.1, 0.7, 0.6), tolerance = 1e-07) expect_equal(x18$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x18$successPerStage[2, ], c(0.3, 0.1, 0.7, 0.6), tolerance = 1e-07) expect_equal(x18$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x18$selectedArms)), c(1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.1, 0, 1, 0.1, 0.1, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0, 0, 1, 0.1, 0, 1, 0.1, 0.1, 1, 0.2, 0.2, 1, 0.1, 0, 1, 0.4, 0.3, 1, 0.5, 0.2, 1, 0.2, 0.1, 1, 0.6, 0.1, 1, 0.4, 0, 1, 1, 0.7, 1, 0.9, 0.8, 1, 0.8, 0.1, 1, 1, 0.4), tolerance = 1e-07) expect_equal(x18$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x18$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x18$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x18$expectedNumberOfSubjects, c(179.95701, 273.63073, 113.26043, 249.89211), tolerance = 1e-07) expect_equal(unlist(as.list(x18$sampleSizes)), c(10, 1.1167748, 1.5953926, 10, 5.9718999, 6.7183874, 10, 0.5, 0, 10, 10, 25, 10, 6.7277808, 9.6111155, 10, 37.581628, 42.279332, 10, 0, 0, 10, 10, 0, 10, 2.4005123, 3.4293032, 10, 8.5110901, 9.5749763, 10, 11.503905, 0, 10, 32.126443, 55.316107, 10, 28.865098, 22.318956, 10, 15.610585, 5.061908, 10, 24.075387, 27.667829, 10, 15.693169, 0, 10, 39.110166, 36.954767, 10, 67.675203, 63.634604, 10, 36.079292, 27.667829, 10, 67.819612, 80.316107), tolerance = 1e-07) expect_equal(x18$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x18$conditionalPowerAchieved[2, ], c(0.064552587, 0.050542809, 0.13271614, 0.098246228), tolerance = 1e-07) expect_equal(x18$conditionalPowerAchieved[3, ], c(0.1164829, 0.22353174, 0.16556673, 0.12567304), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x18), NA))) expect_output(print(x18)$show()) invisible(capture.output(expect_error(summary(x18), NA))) expect_output(summary(x18)$show()) x18CodeBased <- eval(parse(text = getObjectRCode(x18, stringWrapParagraphWidth = NULL))) expect_equal(x18CodeBased$iterations, x18$iterations, tolerance = 1e-05) expect_equal(x18CodeBased$rejectAtLeastOne, x18$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x18CodeBased$rejectedArmsPerStage, x18$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x18CodeBased$futilityStop, x18$futilityStop, tolerance = 1e-05) expect_equal(x18CodeBased$futilityPerStage, x18$futilityPerStage, tolerance = 1e-05) expect_equal(x18CodeBased$earlyStop, x18$earlyStop, tolerance = 1e-05) expect_equal(x18CodeBased$successPerStage, x18$successPerStage, tolerance = 1e-05) expect_equal(x18CodeBased$selectedArms, x18$selectedArms, tolerance = 1e-05) expect_equal(x18CodeBased$numberOfActiveArms, x18$numberOfActiveArms, tolerance = 1e-05) expect_equal(x18CodeBased$expectedNumberOfSubjects, x18$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x18CodeBased$sampleSizes, x18$sampleSizes, tolerance = 1e-05) expect_equal(x18CodeBased$conditionalPowerAchieved, x18$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x18), "character") df <- as.data.frame(x18) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x18) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x19 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "all", plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), intersectionTest = "Hierarchical", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x19' with expected results expect_equal(x19$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x19$iterations[2, ], c(10, 7, 9, 10)) expect_equal(x19$iterations[3, ], c(6, 3, 4, 6)) expect_equal(x19$rejectAtLeastOne, c(0.1, 0, 0.1, 0), tolerance = 1e-07) expect_equal(unlist(as.list(x19$rejectedArmsPerStage)), c(0, 0.1, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0), tolerance = 1e-07) expect_equal(x19$futilityStop, c(0.4, 0.7, 0.6, 0.4), tolerance = 1e-07) expect_equal(x19$futilityPerStage[1, ], c(0, 0.3, 0.1, 0), tolerance = 1e-07) expect_equal(x19$futilityPerStage[2, ], c(0.4, 0.4, 0.5, 0.4), tolerance = 1e-07) expect_equal(x19$earlyStop[1, ], c(0, 0.3, 0.1, 0), tolerance = 1e-07) expect_equal(x19$earlyStop[2, ], c(0.4, 0.4, 0.5, 0.4), tolerance = 1e-07) expect_equal(x19$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x19$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x19$successPerStage[3, ], c(0, 0, 0.1, 0), tolerance = 1e-07) expect_equal(unlist(as.list(x19$selectedArms)), c(1, 0.6, 0.6, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0.6, 0.6, 1, 0.6, 0.4, 1, 0.5, 0.2, 1, 0.7, 0.3, 1, 1, 0.6, 1, 0.6, 0.4, 1, 0.7, 0.3, 1, 0.7, 0.4, 1, 0.7, 0.4, 1, 0.7, 0.3, 1, 0.5, 0.2, 1, 0.9, 0.4, 1, 0.8, 0.5, 1, 1, 0.6, 1, 0.7, 0.3, 1, 0.9, 0.4, 1, 1, 0.6), tolerance = 1e-07) expect_equal(x19$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x19$numberOfActiveArms[2, ], c(2.5, 2.8571429, 3, 3.1), tolerance = 1e-07) expect_equal(x19$numberOfActiveArms[3, ], c(2.8333333, 3.3333333, 3.75, 3.5), tolerance = 1e-07) expect_equal(x19$expectedNumberOfSubjects, c(600.66781, 398.09964, 600, 634), tolerance = 1e-07) expect_equal(unlist(as.list(x19$sampleSizes)), c(10, 56.333476, 93.889127, 10, 42.857143, 100, 10, 44.444444, 100, 10, 50.4, 84, 10, 56.333476, 60.555794, 10, 52.89273, 66.666667, 10, 77.777778, 75, 10, 90.4, 84, 10, 60, 66.666667, 10, 81.464159, 100, 10, 77.777778, 100, 10, 60.4, 50.666667, 10, 66.333476, 43.889127, 10, 52.89273, 66.666667, 10, 100, 100, 10, 70.4, 67.333333, 10, 96.333476, 93.889127, 10, 81.464159, 100, 10, 100, 100, 10, 90.4, 84), tolerance = 1e-07) expect_equal(x19$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x19$conditionalPowerAchieved[2, ], c(0.014835699, 0.082104288, 0.088043543, 0.18689602), tolerance = 1e-07) expect_equal(x19$conditionalPowerAchieved[3, ], c(0.35039062, 0.35957167, 0.84477407, 0.62586447), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x19), NA))) expect_output(print(x19)$show()) invisible(capture.output(expect_error(summary(x19), NA))) expect_output(summary(x19)$show()) x19CodeBased <- eval(parse(text = getObjectRCode(x19, stringWrapParagraphWidth = NULL))) expect_equal(x19CodeBased$iterations, x19$iterations, tolerance = 1e-05) expect_equal(x19CodeBased$rejectAtLeastOne, x19$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x19CodeBased$rejectedArmsPerStage, x19$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x19CodeBased$futilityStop, x19$futilityStop, tolerance = 1e-05) expect_equal(x19CodeBased$futilityPerStage, x19$futilityPerStage, tolerance = 1e-05) expect_equal(x19CodeBased$earlyStop, x19$earlyStop, tolerance = 1e-05) expect_equal(x19CodeBased$successPerStage, x19$successPerStage, tolerance = 1e-05) expect_equal(x19CodeBased$selectedArms, x19$selectedArms, tolerance = 1e-05) expect_equal(x19CodeBased$numberOfActiveArms, x19$numberOfActiveArms, tolerance = 1e-05) expect_equal(x19CodeBased$expectedNumberOfSubjects, x19$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x19CodeBased$sampleSizes, x19$sampleSizes, tolerance = 1e-05) expect_equal(x19CodeBased$conditionalPowerAchieved, x19$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x19), "character") df <- as.data.frame(x19) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x19) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x20 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "rBest", rValue = 2, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), intersectionTest = "Hierarchical", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x20' with expected results expect_equal(x20$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x20$iterations[2, ], c(9, 9, 8, 10)) expect_equal(x20$iterations[3, ], c(2, 6, 3, 2)) expect_equal(x20$rejectAtLeastOne, c(0, 0.2, 0, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x20$rejectedArmsPerStage)), c(0, 0, 0, 0.1, 0, 0.1, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), tolerance = 1e-07) expect_equal(x20$futilityStop, c(0.8, 0.4, 0.7, 0.7), tolerance = 1e-07) expect_equal(x20$futilityPerStage[1, ], c(0.1, 0.1, 0.2, 0), tolerance = 1e-07) expect_equal(x20$futilityPerStage[2, ], c(0.7, 0.3, 0.5, 0.7), tolerance = 1e-07) expect_equal(x20$earlyStop[1, ], c(0.1, 0.1, 0.2, 0), tolerance = 1e-07) expect_equal(x20$earlyStop[2, ], c(0.7, 0.3, 0.5, 0.8), tolerance = 1e-07) expect_equal(x20$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x20$successPerStage[2, ], c(0, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x20$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x20$selectedArms)), c(1, 0.2, 0.2, 1, 0.6, 0.6, 1, 0.3, 0.3, 1, 0.3, 0.2, 1, 0.4, 0, 1, 0.1, 0, 1, 0.3, 0, 1, 0.5, 0, 1, 0.3, 0, 1, 0.3, 0.1, 1, 0.3, 0.1, 1, 0.4, 0, 1, 0.8, 0.2, 1, 0.8, 0.5, 1, 0.5, 0.2, 1, 0.8, 0.2, 1, 0.9, 0.2, 1, 0.9, 0.6, 1, 0.8, 0.3, 1, 1, 0.2), tolerance = 1e-07) expect_equal(x20$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x20$numberOfActiveArms[2, ], c(1.8888889, 2, 1.75, 2), tolerance = 1e-07) expect_equal(x20$numberOfActiveArms[3, ], c(2, 2, 2, 2)) expect_equal(x20$expectedNumberOfSubjects, c(307.09166, 377.99189, 286.78887, 300.60787), tolerance = 1e-07) expect_equal(unlist(as.list(x20$sampleSizes)), c(10, 22.222222, 100, 10, 47.097546, 70.646318, 10, 30.537829, 81.43421, 10, 25.370782, 76.853911, 10, 33.228314, 0, 10, 11.111111, 0, 10, 27.257545, 0, 10, 38.448036, 0, 10, 17.763874, 0, 10, 27.283387, 16.666667, 10, 33.529937, 33.333333, 10, 22.273651, 0, 10, 69.075708, 100, 10, 63.269822, 53.979652, 10, 39.758676, 48.100877, 10, 50.237878, 76.853911, 10, 76.700615, 100, 10, 74.380933, 70.646318, 10, 73.288614, 81.43421, 10, 68.165174, 76.853911), tolerance = 1e-07) expect_equal(x20$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x20$conditionalPowerAchieved[2, ], c(0.0535706, 0.15544115, 0.10470149, 0.094637028), tolerance = 1e-07) expect_equal(x20$conditionalPowerAchieved[3, ], c(0.09464551, 0.36740056, 0.23354895, 0.75738479), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x20), NA))) expect_output(print(x20)$show()) invisible(capture.output(expect_error(summary(x20), NA))) expect_output(summary(x20)$show()) x20CodeBased <- eval(parse(text = getObjectRCode(x20, stringWrapParagraphWidth = NULL))) expect_equal(x20CodeBased$iterations, x20$iterations, tolerance = 1e-05) expect_equal(x20CodeBased$rejectAtLeastOne, x20$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x20CodeBased$rejectedArmsPerStage, x20$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x20CodeBased$futilityStop, x20$futilityStop, tolerance = 1e-05) expect_equal(x20CodeBased$futilityPerStage, x20$futilityPerStage, tolerance = 1e-05) expect_equal(x20CodeBased$earlyStop, x20$earlyStop, tolerance = 1e-05) expect_equal(x20CodeBased$successPerStage, x20$successPerStage, tolerance = 1e-05) expect_equal(x20CodeBased$selectedArms, x20$selectedArms, tolerance = 1e-05) expect_equal(x20CodeBased$numberOfActiveArms, x20$numberOfActiveArms, tolerance = 1e-05) expect_equal(x20CodeBased$expectedNumberOfSubjects, x20$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x20CodeBased$sampleSizes, x20$sampleSizes, tolerance = 1e-05) expect_equal(x20CodeBased$conditionalPowerAchieved, x20$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x20), "character") df <- as.data.frame(x20) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x20) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x21 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), intersectionTest = "Hierarchical", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x21' with expected results expect_equal(x21$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x21$iterations[2, ], c(9, 9, 10, 10)) expect_equal(x21$iterations[3, ], c(1, 1, 3, 0)) expect_equal(x21$rejectAtLeastOne, c(0, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(unlist(as.list(x21$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), tolerance = 1e-07) expect_equal(x21$futilityStop, c(0.9, 0.9, 0.6, 1), tolerance = 1e-07) expect_equal(x21$futilityPerStage[1, ], c(0.1, 0.1, 0, 0), tolerance = 1e-07) expect_equal(x21$futilityPerStage[2, ], c(0.8, 0.8, 0.6, 1), tolerance = 1e-07) expect_equal(x21$earlyStop[1, ], c(0.1, 0.1, 0, 0), tolerance = 1e-07) expect_equal(x21$earlyStop[2, ], c(0.8, 0.8, 0.7, 1), tolerance = 1e-07) expect_equal(x21$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x21$successPerStage[2, ], c(0, 0, 0.1, 0), tolerance = 1e-07) expect_equal(x21$successPerStage[3, ], c(0, 0.1, 0, 0), tolerance = 1e-07) expect_equal(unlist(as.list(x21$selectedArms)), c(1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.4, 0.3, 1, 0, 0, 1, 0.3, 0, 1, 0.3, 0, 1, 0, 0, 1, 0.3, 0, 1, 0.3, 0, 1, 0.2, 0, 1, 0.2, 0, 1, 0.1, 0, 1, 0.3, 0, 1, 0.5, 0, 1, 0.5, 0.1, 1, 0.6, 0, 1, 0.9, 0.1, 1, 0.9, 0.1, 1, 1, 0.3, 1, 1, 0), tolerance = 1e-07) expect_equal(x21$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x21$numberOfActiveArms[2, ], c(1.1111111, 1.2222222, 1.1, 1), tolerance = 1e-07) expect_equal(x21$numberOfActiveArms[3, ], c(1, 1, 1.3333333, NaN), tolerance = 1e-07) expect_equal(x21$expectedNumberOfSubjects, c(190.08367, 169.68391, 280.67025, NaN), tolerance = 1e-07) expect_equal(unlist(as.list(x21$sampleSizes)), c(10, 4.4952582, 40.457324, 10, 11.111111, 100, 10, 30.407004, 99.157615, 10, 0, NaN, 10, 19.514172, 0, 10, 21.967417, 0, 10, 0, 0, 10, 21.272121, NaN, 10, 25.406757, 0, 10, 11.52108, 0, 10, 16.622221, 0, 10, 10, NaN, 10, 25.603407, 0, 10, 20.034041, 0, 10, 38.558614, 33.333333, 10, 21.010924, NaN, 10, 71.638409, 40.457324, 10, 46.126253, 100, 10, 75.587839, 99.157615, 10, 52.283045, NaN), tolerance = 1e-07) expect_equal(x21$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x21$conditionalPowerAchieved[2, ], c(0.023159424, 0.14301241, 0.046563399, 0.11230633), tolerance = 1e-07) expect_equal(x21$conditionalPowerAchieved[3, ], c(0.07537462, 0.00060378387, 0.33359002, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x21), NA))) expect_output(print(x21)$show()) invisible(capture.output(expect_error(summary(x21), NA))) expect_output(summary(x21)$show()) x21CodeBased <- eval(parse(text = getObjectRCode(x21, stringWrapParagraphWidth = NULL))) expect_equal(x21CodeBased$iterations, x21$iterations, tolerance = 1e-05) expect_equal(x21CodeBased$rejectAtLeastOne, x21$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x21CodeBased$rejectedArmsPerStage, x21$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x21CodeBased$futilityStop, x21$futilityStop, tolerance = 1e-05) expect_equal(x21CodeBased$futilityPerStage, x21$futilityPerStage, tolerance = 1e-05) expect_equal(x21CodeBased$earlyStop, x21$earlyStop, tolerance = 1e-05) expect_equal(x21CodeBased$successPerStage, x21$successPerStage, tolerance = 1e-05) expect_equal(x21CodeBased$selectedArms, x21$selectedArms, tolerance = 1e-05) expect_equal(x21CodeBased$numberOfActiveArms, x21$numberOfActiveArms, tolerance = 1e-05) expect_equal(x21CodeBased$expectedNumberOfSubjects, x21$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x21CodeBased$sampleSizes, x21$sampleSizes, tolerance = 1e-05) expect_equal(x21CodeBased$conditionalPowerAchieved, x21$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x21), "character") df <- as.data.frame(x21) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x21) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x22 <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0.1, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.1, 0.3, 0.1), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 1 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x22' with expected results expect_equal(x22$iterations[1, ], c(1, 1, 1)) expect_equal(x22$iterations[2, ], c(1, 1, 1)) expect_equal(x22$iterations[3, ], c(0, 1, 1)) expect_equal(x22$rejectAtLeastOne, c(0, 0, 0)) expect_equal(unlist(as.list(x22$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x22$futilityStop, c(1, 0, 0)) expect_equal(x22$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(x22$futilityPerStage[2, ], c(1, 0, 0)) expect_equal(x22$earlyStop[1, ], c(0, 0, 0)) expect_equal(x22$earlyStop[2, ], c(1, 0, 0)) expect_equal(x22$successPerStage[1, ], c(0, 0, 0)) expect_equal(x22$successPerStage[2, ], c(0, 0, 0)) expect_equal(x22$successPerStage[3, ], c(0, 0, 0)) expect_equal(unlist(as.list(x22$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1)) expect_equal(x22$numberOfActiveArms[1, ], c(4, 4, 4)) expect_equal(x22$numberOfActiveArms[2, ], c(1, 1, 1)) expect_equal(x22$numberOfActiveArms[3, ], c(NaN, 1, 1)) expect_equal(x22$expectedNumberOfSubjects, c(NaN, 450, 148.90979), tolerance = 1e-07) expect_equal(unlist(as.list(x22$sampleSizes)), c(10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 32.875253, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 100, 100, 10, 10.358511, 39.096382, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 32.875253, 0, 10, 100, 100, 10, 10.358511, 39.096382), tolerance = 1e-07) expect_equal(x22$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x22$conditionalPowerAchieved[2, ], c(0.011749146, 0.0034013018, 0.045375018), tolerance = 1e-07) expect_equal(x22$conditionalPowerAchieved[3, ], c(NaN, 0.15769372, 0.8), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x22), NA))) expect_output(print(x22)$show()) invisible(capture.output(expect_error(summary(x22), NA))) expect_output(summary(x22)$show()) x22CodeBased <- eval(parse(text = getObjectRCode(x22, stringWrapParagraphWidth = NULL))) expect_equal(x22CodeBased$iterations, x22$iterations, tolerance = 1e-05) expect_equal(x22CodeBased$rejectAtLeastOne, x22$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x22CodeBased$rejectedArmsPerStage, x22$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x22CodeBased$futilityStop, x22$futilityStop, tolerance = 1e-05) expect_equal(x22CodeBased$futilityPerStage, x22$futilityPerStage, tolerance = 1e-05) expect_equal(x22CodeBased$earlyStop, x22$earlyStop, tolerance = 1e-05) expect_equal(x22CodeBased$successPerStage, x22$successPerStage, tolerance = 1e-05) expect_equal(x22CodeBased$selectedArms, x22$selectedArms, tolerance = 1e-05) expect_equal(x22CodeBased$numberOfActiveArms, x22$numberOfActiveArms, tolerance = 1e-05) expect_equal(x22CodeBased$expectedNumberOfSubjects, x22$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x22CodeBased$sampleSizes, x22$sampleSizes, tolerance = 1e-05) expect_equal(x22CodeBased$conditionalPowerAchieved, x22$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x22), "character") df <- as.data.frame(x22) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x22) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmMeans': using calcSubjectsFunction", { .skipTestIfDisabled() calcSubjectsFunctionSimulationMultiArmMeans <- function(..., stage, minNumberOfSubjectsPerStage) { return(ifelse(stage == 3, 33, minNumberOfSubjectsPerStage[stage])) } x <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10, calcSubjectsFunction = calcSubjectsFunctionSimulationMultiArmMeans ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x' with expected results expect_equal(x$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x$iterations[3, ], c(9, 9, 8, 8)) expect_equal(x$rejectAtLeastOne, c(0.1, 0.1, 0.3, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0.1, 0, 0.2), tolerance = 1e-07) expect_equal(x$futilityStop, c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[2, ], c(0.1, 0.1, 0.2, 0.2), tolerance = 1e-07) expect_equal(x$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$successPerStage[2, ], c(0.1, 0.1, 0.2, 0.2), tolerance = 1e-07) expect_equal(x$successPerStage[3, ], c(0, 0, 0.1, 0.2), tolerance = 1e-07) expect_equal(unlist(as.list(x$selectedArms)), c(1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.1, 0, 1, 0, 0, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0, 0, 1, 0.3, 0.2, 1, 0.1, 0.1, 1, 0.3, 0.2, 1, 0.4, 0.4, 1, 0.1, 0.1, 1, 0.5, 0.4, 1, 0.2, 0.2, 1, 0.5, 0.4, 1, 0.6, 0.5, 1, 1, 0.9, 1, 1, 0.9, 1, 1, 0.8, 1, 1, 0.8), tolerance = 1e-07) expect_equal(x$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x$expectedNumberOfSubjects, c(117.4, 117.4, 110.8, 110.8), tolerance = 1e-07) expect_equal(unlist(as.list(x$sampleSizes)), c(10, 0.4, 3.6666667, 10, 0.4, 3.6666667, 10, 0.4, 0, 10, 0, 0, 10, 1.2, 11, 10, 1.6, 14.666667, 10, 0, 0, 10, 1.2, 8.25, 10, 0.4, 3.6666667, 10, 1.2, 7.3333333, 10, 1.6, 16.5, 10, 0.4, 4.125, 10, 2, 14.666667, 10, 0.8, 7.3333333, 10, 2, 16.5, 10, 2.4, 20.625, 10, 4, 33, 10, 4, 33, 10, 4, 33, 10, 4, 33), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$conditionalPowerAchieved[2, ], c(0.054038913, 0.015750083, 0.11207917, 0.055949011), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[3, ], c(0.44922292, 0.31010643, 0.28872426, 0.56321232), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$iterations, x$iterations, tolerance = 1e-05) expect_equal(xCodeBased$rejectAtLeastOne, x$rejectAtLeastOne, tolerance = 1e-05) expect_equal(xCodeBased$rejectedArmsPerStage, x$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$futilityStop, x$futilityStop, tolerance = 1e-05) expect_equal(xCodeBased$futilityPerStage, x$futilityPerStage, tolerance = 1e-05) expect_equal(xCodeBased$earlyStop, x$earlyStop, tolerance = 1e-05) expect_equal(xCodeBased$successPerStage, x$successPerStage, tolerance = 1e-05) expect_equal(xCodeBased$selectedArms, x$selectedArms, tolerance = 1e-05) expect_equal(xCodeBased$numberOfActiveArms, x$numberOfActiveArms, tolerance = 1e-05) expect_equal(xCodeBased$expectedNumberOfSubjects, x$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(xCodeBased$sampleSizes, x$sampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$conditionalPowerAchieved, x$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmMeans': using selectArmsFunction", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmMeans} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmMeansGenerate} # @refFS[Formula]{fs:simulationMultiArmMeansTestStatistics} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} selectArmsFunctionSimulationMultiArmMeans <- function(effectSizes) { return(c(TRUE, FALSE, FALSE, FALSE)) } x <- getSimulationMultiArmMeans( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, plannedSubjects = c(10, 30, 50), stDev = 1.2, muMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), maxNumberOfIterations = 10, selectArmsFunction = selectArmsFunctionSimulationMultiArmMeans, typeOfSelection = "userDefined" ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x' with expected results expect_equal(x$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x$iterations[3, ], c(10, 9, 9, 10)) expect_equal(x$rejectAtLeastOne, c(0.1, 0.1, 0.2, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x$rejectedArmsPerStage)), c(0, 0, 0, 0, 0.1, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1, 0, 0), tolerance = 1e-07) expect_equal(x$futilityStop, c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[2, ], c(0, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(x$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$successPerStage[2, ], c(0, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(x$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x$selectedArms)), c(1, 1, 1, 1, 1, 0.9, 1, 1, 0.9, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.9, 1, 1, 1), tolerance = 1e-07) expect_equal(x$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x$expectedNumberOfSubjects, c(130, 126, 126, 130)) expect_equal(unlist(as.list(x$sampleSizes)), c(10, 20, 20, 10, 20, 20, 10, 20, 20, 10, 20, 20, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 20, 20, 10, 20, 20, 10, 20, 20, 10, 20, 20)) expect_equal(x$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$conditionalPowerAchieved[2, ], c(0.091251689, 0.027836233, 0.13855746, 0.12908437), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[3, ], c(0.071420101, 0.027813347, 0.076509581, 0.21688562), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$iterations, x$iterations, tolerance = 1e-05) expect_equal(xCodeBased$rejectAtLeastOne, x$rejectAtLeastOne, tolerance = 1e-05) expect_equal(xCodeBased$rejectedArmsPerStage, x$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$futilityStop, x$futilityStop, tolerance = 1e-05) expect_equal(xCodeBased$futilityPerStage, x$futilityPerStage, tolerance = 1e-05) expect_equal(xCodeBased$earlyStop, x$earlyStop, tolerance = 1e-05) expect_equal(xCodeBased$successPerStage, x$successPerStage, tolerance = 1e-05) expect_equal(xCodeBased$selectedArms, x$selectedArms, tolerance = 1e-05) expect_equal(xCodeBased$numberOfActiveArms, x$numberOfActiveArms, tolerance = 1e-05) expect_equal(xCodeBased$expectedNumberOfSubjects, x$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(xCodeBased$sampleSizes, x$sampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$conditionalPowerAchieved, x$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmMeans': using intersectionTest = 'Sidak' and typeOfSelection = 'rBest'", { .skipTestIfDisabled() designIN <- getDesignInverseNormal(typeOfDesign = "P", kMax = 3, futilityBounds = c(0, 0)) x <- getSimulationMultiArmMeans(designIN, activeArms = 3, typeOfShape = "sigmoidEmax", muMaxVector = seq(0, 1, 0.2), gED50 = 2, plannedSubjects = cumsum(rep(20, 3)), intersectionTest = "Sidak", typeOfSelection = "rBest", rValue = 2, threshold = -Inf, successCriterion = "all", maxNumberOfIterations = 100, seed = 3456 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x' with expected results expect_equal(x$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x$iterations[2, ], c(42, 52, 69, 77, 88, 87)) expect_equal(x$iterations[3, ], c(30, 33, 61, 73, 80, 61)) expect_equal(x$rejectAtLeastOne, c(0.02, 0.03, 0.18, 0.33, 0.49, 0.8), tolerance = 1e-07) expect_equal(unlist(as.list(x$rejectedArmsPerStage)), c(0, 0, 0.01, 0, 0, 0, 0.01, 0.01, 0.02, 0.03, 0.01, 0.02, 0.01, 0.01, 0.06, 0.1, 0.04, 0.04, 0.01, 0, 0, 0.01, 0.02, 0, 0.03, 0, 0.03, 0.04, 0.06, 0.08, 0.08, 0.11, 0.1, 0.14, 0.27, 0.12, 0, 0, 0, 0, 0.01, 0, 0.02, 0.01, 0.08, 0.08, 0.05, 0.11, 0.09, 0.16, 0.13, 0.18, 0.25, 0.24), tolerance = 1e-07) expect_equal(x$futilityStop, c(0.7, 0.66, 0.39, 0.23, 0.11, 0.07), tolerance = 1e-07) expect_equal(x$futilityPerStage[1, ], c(0.58, 0.48, 0.31, 0.22, 0.11, 0.07), tolerance = 1e-07) expect_equal(x$futilityPerStage[2, ], c(0.12, 0.18, 0.08, 0.01, 0, 0), tolerance = 1e-07) expect_equal(x$earlyStop[1, ], c(0.58, 0.48, 0.31, 0.23, 0.12, 0.13), tolerance = 1e-07) expect_equal(x$earlyStop[2, ], c(0.12, 0.19, 0.08, 0.04, 0.08, 0.26), tolerance = 1e-07) expect_equal(x$successPerStage[1, ], c(0, 0, 0, 0.01, 0.01, 0.06), tolerance = 1e-07) expect_equal(x$successPerStage[2, ], c(0, 0.01, 0, 0.03, 0.08, 0.26), tolerance = 1e-07) expect_equal(x$successPerStage[3, ], c(0, 0, 0.03, 0.1, 0.16, 0.2), tolerance = 1e-07) expect_equal(unlist(as.list(x$selectedArms)), c(1, 0.25, 0.17, 1, 0.25, 0.16, 1, 0.31, 0.26, 1, 0.32, 0.3, 1, 0.42, 0.41, 1, 0.32, 0.26, 1, 0.32, 0.22, 1, 0.43, 0.26, 1, 0.48, 0.45, 1, 0.56, 0.54, 1, 0.63, 0.56, 1, 0.7, 0.47, 1, 0.27, 0.21, 1, 0.36, 0.24, 1, 0.59, 0.51, 1, 0.66, 0.62, 1, 0.71, 0.63, 1, 0.72, 0.49, 1, 0.42, 0.3, 1, 0.52, 0.33, 1, 0.69, 0.61, 1, 0.77, 0.73, 1, 0.88, 0.8, 1, 0.87, 0.61), tolerance = 1e-07) expect_equal(x$numberOfActiveArms[1, ], c(3, 3, 3, 3, 3, 3)) expect_equal(x$numberOfActiveArms[2, ], c(2, 2, 2, 2, 2, 2)) expect_equal(x$numberOfActiveArms[3, ], c(2, 2, 2, 2, 2, 2)) expect_equal(x$expectedNumberOfSubjects, c(123.2, 131, 158, 170, 180.8, 168.8), tolerance = 1e-07) expect_equal(unlist(as.list(x$sampleSizes)), c(20, 11.904762, 11.333333, 20, 9.6153846, 9.6969697, 20, 8.9855072, 8.5245902, 20, 8.3116883, 8.2191781, 20, 9.5454545, 10.25, 20, 7.3563218, 8.5245902, 20, 15.238095, 14.666667, 20, 16.538462, 15.757576, 20, 13.913043, 14.754098, 20, 14.545455, 14.794521, 20, 14.318182, 14, 20, 16.091954, 15.409836, 20, 12.857143, 14, 20, 13.846154, 14.545455, 20, 17.101449, 16.721311, 20, 17.142857, 16.986301, 20, 16.136364, 15.75, 20, 16.551724, 16.065574, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$conditionalPowerAchieved[2, ], c(0.058967382, 0.048523877, 0.17154294, 0.22180985, 0.2182802, 0.37414282), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[3, ], c(0.077820194, 0.14430526, 0.21266388, 0.28752608, 0.40185892, 0.5016109), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$iterations, x$iterations, tolerance = 1e-05) expect_equal(xCodeBased$rejectAtLeastOne, x$rejectAtLeastOne, tolerance = 1e-05) expect_equal(xCodeBased$rejectedArmsPerStage, x$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$futilityStop, x$futilityStop, tolerance = 1e-05) expect_equal(xCodeBased$futilityPerStage, x$futilityPerStage, tolerance = 1e-05) expect_equal(xCodeBased$earlyStop, x$earlyStop, tolerance = 1e-05) expect_equal(xCodeBased$successPerStage, x$successPerStage, tolerance = 1e-05) expect_equal(xCodeBased$selectedArms, x$selectedArms, tolerance = 1e-05) expect_equal(xCodeBased$numberOfActiveArms, x$numberOfActiveArms, tolerance = 1e-05) expect_equal(xCodeBased$expectedNumberOfSubjects, x$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(xCodeBased$sampleSizes, x$sampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$conditionalPowerAchieved, x$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmMeans': plot drift - comparison of raw values", { .skipTestIfDisabled() designPureConditionalDunnett <- getDesignInverseNormal(typeOfDesign = "asUser", userAlphaSpending = c(0, 0.025)) designCombinationDunnett <- getDesignConditionalDunnett(informationAtInterim = 0.5, secondStageConditioning = TRUE) resultsPureConditionalDunnett <- getSimulationMultiArmMeans(designPureConditionalDunnett, activeArms = 3, muMaxVector = seq(0, 1, 0.2), typeOfShape = "linear", plannedSubjects = cumsum(rep(20, 2)), intersectionTest = "Dunnett", adaptations = TRUE, typeOfSelection = "best", effectMeasure = "effectEstimate", threshold = -Inf, maxNumberOfIterations = 100, allocationRatioPlanned = 1, seed = 123 ) resultsCombinationDunnett <- getSimulationMultiArmMeans(designCombinationDunnett, activeArms = 3, muMaxVector = seq(0, 1, 0.2), typeOfShape = "linear", plannedSubjects = cumsum(rep(20, 2)), intersectionTest = "Dunnett", adaptations = TRUE, typeOfSelection = "best", effectMeasure = "effectEstimate", threshold = -Inf, maxNumberOfIterations = 100, allocationRatioPlanned = 1, seed = 123 ) drift <- resultsPureConditionalDunnett$effectMatrix[nrow(resultsPureConditionalDunnett$effectMatrix), ] ## Comparison of the results of numeric object 'drift' with expected results expect_equal(drift, c(0, 0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-07) expect_equal(resultsPureConditionalDunnett$rejectAtLeastOne, resultsCombinationDunnett$rejectAtLeastOne, tolerance = 0.06) }) test_that("'getSimulationMultiArmMeans': comparison of base and multi-arm", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmMeans} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmMeansGenerate} # @refFS[Formula]{fs:simulationMultiArmMeansTestStatistics} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} design <- getDesignInverseNormal(typeOfDesign = "WT", deltaWT = 0.15, futilityBounds = c(-0.5, 0), informationRates = c(0.4, 0.8, 1)) x <- getSimulationMultiArmMeans( design = design, activeArms = 1, plannedSubjects = c(20, 40, 60), stDev = 1.5, muMaxVector = seq(0, 1, 0.2), conditionalPower = 0.80, minNumberOfSubjectsPerStage = c(NA, 20, 20), maxNumberOfSubjectsPerStage = c(NA, 80, 80), # thetaH1 = 0.5, maxNumberOfIterations = 100, allocationRatioPlanned = 2, seed = 1234 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x' with expected results expect_equal(x$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x$iterations[2, ], c(81, 88, 89, 88, 93, 79)) expect_equal(x$iterations[3, ], c(53, 70, 64, 51, 37, 12)) expect_equal(x$rejectAtLeastOne, c(0.01, 0.11, 0.39, 0.73, 0.93, 0.98), tolerance = 1e-07) expect_equal(unlist(as.list(x$rejectedArmsPerStage)), c(0, 0, 0.01, 0, 0.05, 0.06, 0.01, 0.22, 0.16, 0.02, 0.37, 0.34, 0.06, 0.56, 0.31, 0.2, 0.67, 0.11), tolerance = 1e-07) expect_equal(x$futilityStop, c(0.47, 0.25, 0.13, 0.1, 0.01, 0.01), tolerance = 1e-07) expect_equal(x$futilityPerStage[1, ], c(0.19, 0.12, 0.1, 0.1, 0.01, 0.01), tolerance = 1e-07) expect_equal(x$futilityPerStage[2, ], c(0.28, 0.13, 0.03, 0, 0, 0), tolerance = 1e-07) expect_equal(x$earlyStop[1, ], c(0.19, 0.12, 0.11, 0.12, 0.07, 0.21), tolerance = 1e-07) expect_equal(x$earlyStop[2, ], c(0.28, 0.18, 0.25, 0.37, 0.56, 0.67), tolerance = 1e-07) expect_equal(x$successPerStage[1, ], c(0, 0, 0.01, 0.02, 0.06, 0.2), tolerance = 1e-07) expect_equal(x$successPerStage[2, ], c(0, 0.05, 0.22, 0.37, 0.56, 0.67), tolerance = 1e-07) expect_equal(x$successPerStage[3, ], c(0.01, 0.06, 0.16, 0.34, 0.31, 0.11), tolerance = 1e-07) expect_equal(unlist(as.list(x$selectedArms)), c(1, 0.81, 0.53, 1, 0.88, 0.7, 1, 0.89, 0.64, 1, 0.88, 0.51, 1, 0.93, 0.37, 1, 0.79, 0.12, 1, 0.81, 0.53, 1, 0.88, 0.7, 1, 0.89, 0.64, 1, 0.88, 0.51, 1, 0.93, 0.37, 1, 0.79, 0.12), tolerance = 1e-07) expect_equal(x$numberOfActiveArms[1, ], c(1, 1, 1, 1, 1, 1)) expect_equal(x$numberOfActiveArms[2, ], c(1, 1, 1, 1, 1, 1)) expect_equal(x$numberOfActiveArms[3, ], c(1, 1, 1, 1, 1, 1)) expect_equal(x$expectedNumberOfSubjects, c(182.97526, 204.64426, 195.25807, 156.41809, 139.22312, 94.296637), tolerance = 1e-07) expect_equal(unlist(as.list(x$sampleSizes)), c(20, 74.777896, 78.138507, 20, 71.766138, 76.107578, 20, 69.720212, 75.189157, 20, 60.637889, 60.622327, 20, 55.732819, 56.713222, 20, 47.895918, 41.888746, 10, 37.388948, 39.069254, 10, 35.883069, 38.053789, 10, 34.860106, 37.594578, 10, 30.318944, 30.311164, 10, 27.86641, 28.356611, 10, 23.947959, 20.944373), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$conditionalPowerAchieved[2, ], c(0.22017652, 0.27054625, 0.3536952, 0.48224278, 0.56831776, 0.65933958), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[3, ], c(0.12006552, 0.18276066, 0.26908136, 0.50518351, 0.66786884, 0.67359844), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$iterations, x$iterations, tolerance = 1e-05) expect_equal(xCodeBased$rejectAtLeastOne, x$rejectAtLeastOne, tolerance = 1e-05) expect_equal(xCodeBased$rejectedArmsPerStage, x$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$futilityStop, x$futilityStop, tolerance = 1e-05) expect_equal(xCodeBased$futilityPerStage, x$futilityPerStage, tolerance = 1e-05) expect_equal(xCodeBased$earlyStop, x$earlyStop, tolerance = 1e-05) expect_equal(xCodeBased$successPerStage, x$successPerStage, tolerance = 1e-05) expect_equal(xCodeBased$selectedArms, x$selectedArms, tolerance = 1e-05) expect_equal(xCodeBased$numberOfActiveArms, x$numberOfActiveArms, tolerance = 1e-05) expect_equal(xCodeBased$expectedNumberOfSubjects, x$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(xCodeBased$sampleSizes, x$sampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$conditionalPowerAchieved, x$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } allocationRatioPlanned <- 2 factor <- 1 + 1 / allocationRatioPlanned y <- getSimulationMeans(design, plannedSubjects = round(factor * c(20, 40, 60)), normalApproximation = TRUE, stDev = 1.5, conditionalPower = 0.80, minNumberOfSubjectsPerStage = round(factor * c(NA, 20, 20)), maxNumberOfSubjectsPerStage = round(factor * c(NA, 80, 80)), alternative = seq(0, 1, 0.2), # thetaH1 = 0.5, maxNumberOfIterations = 100, allocationRatioPlanned = allocationRatioPlanned, seed = 5678 ) comp1 <- y$overallReject - x$rejectAtLeastOne ## Comparison of the results of numeric object 'comp1' with expected results expect_equal(comp1, c(0.03, 0.07, -0.04, 0.01, 0.02, -0.02), tolerance = 1e-07) comp2 <- y$rejectPerStage - x$rejectedArmsPerStage[, , 1] ## Comparison of the results of matrixarray object 'comp2' with expected results expect_equal(comp2[1, ], c(0, 0.02, 0.01, -0.01, 0.04, -0.09), tolerance = 1e-07) expect_equal(comp2[2, ], c(0.03, 0, -0.07, 0.06, 0.02, 0.03), tolerance = 1e-07) expect_equal(comp2[3, ], c(0, 0.05, 0.02, -0.04, -0.04, 0.04), tolerance = 1e-07) comp3 <- y$futilityPerStage - x$futilityPerStage ## Comparison of the results of matrixarray object 'comp3' with expected results expect_equal(comp3[1, ], c(0.17, 0, 0.04, -0.04, 0, 0.02), tolerance = 1e-07) expect_equal(comp3[2, ], c(-0.05, 0.01, 0, 0, 0, 0), tolerance = 1e-07) comp4 <- round(y$sampleSizes - (x$sampleSizes[, , 1] + x$sampleSizes[, , 2]), 1) ## Comparison of the results of matrixarray object 'comp4' with expected results expect_equal(comp4[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(comp4[2, ], c(-2.8, -1.3, -0.3, -0.1, -1.4, 10.8), tolerance = 1e-07) expect_equal(comp4[3, ], c(1.7, -3.3, -3.4, 13.2, -9.7, -6.7), tolerance = 1e-07) comp5 <- round(y$expectedNumberOfSubjects - x$expectedNumberOfSubjects, 1) ## Comparison of the results of numeric object 'comp5' with expected results expect_equal(comp5, c(-37.8, -8.9, -5.5, 10.1, -12.7, 15.8), tolerance = 1e-07) comp6 <- x$earlyStop - y$earlyStop ## Comparison of the results of matrixarray object 'comp6' with expected results expect_equal(comp6[1, ], c(-0.43, -0.22, -0.58, -0.5, -0.27, -0.48), tolerance = 1e-07) expect_equal(comp6[2, ], c(-0.05, -0.32, -0.59, 0.04, 0.06, -0.17), tolerance = 1e-07) }) test_that("'getSimulationMultiArmMeans': comparison of base and multi-arm, Fisher design", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmMeans} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmMeansGenerate} # @refFS[Formula]{fs:simulationMultiArmMeansTestStatistics} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} design <- getDesignFisher(alpha0Vec = c(0.3, 0.4), informationRates = c(0.3, 0.6, 1)) x <- getSimulationMultiArmMeans( design = design, activeArms = 1, plannedSubjects = c(20, 40, 60), stDev = 1.5, muMaxVector = seq(0, 1, 0.2), conditionalPower = 0.80, minNumberOfSubjectsPerStage = c(NA, 20, 20), maxNumberOfSubjectsPerStage = c(NA, 80, 80), # thetaH1 = 0.5, maxNumberOfIterations = 100, allocationRatioPlanned = 2, seed = 1234 ) ## Comparison of the results of SimulationResultsMultiArmMeans object 'x' with expected results expect_equal(x$iterations[1, ], c(100, 100, 100, 100, 100, 100)) expect_equal(x$iterations[2, ], c(28, 41, 50, 54, 56, 51)) expect_equal(x$iterations[3, ], c(7, 24, 27, 21, 24, 7)) expect_equal(x$rejectAtLeastOne, c(0.03, 0.08, 0.28, 0.61, 0.75, 0.89), tolerance = 1e-07) expect_equal(unlist(as.list(x$rejectedArmsPerStage)), c(0.02, 0, 0.01, 0.05, 0.01, 0.02, 0.05, 0.16, 0.07, 0.17, 0.3, 0.14, 0.24, 0.31, 0.2, 0.39, 0.44, 0.06), tolerance = 1e-07) expect_equal(x$futilityStop, c(0.91, 0.7, 0.52, 0.32, 0.21, 0.1), tolerance = 1e-07) expect_equal(x$futilityPerStage[1, ], c(0.7, 0.54, 0.45, 0.29, 0.2, 0.1), tolerance = 1e-07) expect_equal(x$futilityPerStage[2, ], c(0.21, 0.16, 0.07, 0.03, 0.01, 0), tolerance = 1e-07) expect_equal(x$earlyStop[1, ], c(0.72, 0.59, 0.5, 0.46, 0.44, 0.49), tolerance = 1e-07) expect_equal(x$earlyStop[2, ], c(0.21, 0.17, 0.23, 0.33, 0.32, 0.44), tolerance = 1e-07) expect_equal(x$successPerStage[1, ], c(0.02, 0.05, 0.05, 0.17, 0.24, 0.39), tolerance = 1e-07) expect_equal(x$successPerStage[2, ], c(0, 0.01, 0.16, 0.3, 0.31, 0.44), tolerance = 1e-07) expect_equal(x$successPerStage[3, ], c(0.01, 0.02, 0.07, 0.14, 0.2, 0.06), tolerance = 1e-07) expect_equal(unlist(as.list(x$selectedArms)), c(1, 0.28, 0.07, 1, 0.41, 0.24, 1, 0.5, 0.27, 1, 0.54, 0.21, 1, 0.56, 0.24, 1, 0.51, 0.07, 1, 0.28, 0.07, 1, 0.41, 0.24, 1, 0.5, 0.27, 1, 0.54, 0.21, 1, 0.56, 0.24, 1, 0.51, 0.07), tolerance = 1e-07) expect_equal(x$numberOfActiveArms[1, ], c(1, 1, 1, 1, 1, 1)) expect_equal(x$numberOfActiveArms[2, ], c(1, 1, 1, 1, 1, 1)) expect_equal(x$numberOfActiveArms[3, ], c(1, 1, 1, 1, 1, 1)) expect_equal(x$expectedNumberOfSubjects, c(68.211396, 101.92536, 114.30453, 107.14861, 109.24288, 79.622055), tolerance = 1e-07) expect_equal(unlist(as.list(x$sampleSizes)), c(20, 70.979514, 80, 20, 71.410143, 77.800325, 20, 69.572428, 79.321509, 20, 66.884783, 72.926791, 20, 62.423876, 74.4634, 20, 55.785406, 66.154471, 10, 35.489757, 40, 10, 35.705072, 38.900163, 10, 34.786214, 39.660755, 10, 33.442392, 36.463396, 10, 31.211938, 37.2317, 10, 27.892703, 33.077236), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$conditionalPowerAchieved[2, ], c(0.53965216, 0.44870166, 0.54176291, 0.51257459, 0.62161545, 0.65580386), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[3, ], c(0.33271205, 0.28302479, 0.35942136, 0.59988705, 0.63386368, 0.5469144), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$iterations, x$iterations, tolerance = 1e-05) expect_equal(xCodeBased$rejectAtLeastOne, x$rejectAtLeastOne, tolerance = 1e-05) expect_equal(xCodeBased$rejectedArmsPerStage, x$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$futilityStop, x$futilityStop, tolerance = 1e-05) expect_equal(xCodeBased$futilityPerStage, x$futilityPerStage, tolerance = 1e-05) expect_equal(xCodeBased$earlyStop, x$earlyStop, tolerance = 1e-05) expect_equal(xCodeBased$successPerStage, x$successPerStage, tolerance = 1e-05) expect_equal(xCodeBased$selectedArms, x$selectedArms, tolerance = 1e-05) expect_equal(xCodeBased$numberOfActiveArms, x$numberOfActiveArms, tolerance = 1e-05) expect_equal(xCodeBased$expectedNumberOfSubjects, x$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(xCodeBased$sampleSizes, x$sampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$conditionalPowerAchieved, x$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } allocationRatioPlanned <- 2 factor <- 1 + 1 / allocationRatioPlanned y <- getSimulationMeans(design, plannedSubjects = round(factor * c(20, 40, 60)), normalApproximation = TRUE, stDev = 1.5, conditionalPower = 0.80, minNumberOfSubjectsPerStage = round(factor * c(NA, 20, 20)), maxNumberOfSubjectsPerStage = round(factor * c(NA, 80, 80)), alternative = seq(0, 1, 0.2), # thetaH1 = 0.5, maxNumberOfIterations = 100, allocationRatioPlanned = allocationRatioPlanned, seed = 5678 ) comp1 <- y$overallReject - x$rejectAtLeastOne ## Comparison of the results of numeric object 'comp1' with expected results expect_equal(comp1, c(-0.01, 0.02, 0.05, -0.03, -0.04, -0.04), tolerance = 1e-07) comp2 <- y$rejectPerStage - x$rejectedArmsPerStage[, , 1] ## Comparison of the results of matrixarray object 'comp2' with expected results expect_equal(comp2[1, ], c(-0.01, -0.02, 0.05, -0.01, -0.05, -0.06), tolerance = 1e-07) expect_equal(comp2[2, ], c(0.01, 0.03, -0.07, -0.08, 0.04, 0.05), tolerance = 1e-07) expect_equal(comp2[3, ], c(-0.01, 0.01, 0.07, 0.06, -0.03, -0.03), tolerance = 1e-07) comp3 <- y$futilityPerStage - x$futilityPerStage ## Comparison of the results of matrixarray object 'comp3' with expected results expect_equal(comp3[1, ], c(0.08, 0.03, 0.01, 0.04, 0.02, 0.04), tolerance = 1e-07) expect_equal(comp3[2, ], c(-0.1, 0.03, 0, 0, 0.03, 0.01), tolerance = 1e-07) comp4 <- round(y$sampleSizes - (x$sampleSizes[, , 1] + x$sampleSizes[, , 2]), 1) ## Comparison of the results of matrixarray object 'comp4' with expected results expect_equal(comp4[1, ], c(0, 0, 0, 0, 0, 0)) expect_equal(comp4[2, ], c(-3.6, -5.8, 8.4, 5.5, -3.5, 4.7), tolerance = 1e-07) expect_equal(comp4[3, ], c(0, -1.8, -3.2, 7.1, -0.8, -19), tolerance = 1e-07) comp5 <- round(y$expectedNumberOfSubjects - x$expectedNumberOfSubjects, 1) ## Comparison of the results of numeric object 'comp5' with expected results expect_equal(comp5, c(-5.8, -11.9, -2.3, 7.1, -3.9, -0.3), tolerance = 1e-07) comp6 <- x$earlyStop - y$earlyStop ## Comparison of the results of matrixarray object 'comp6' with expected results expect_equal(comp6[1, ], c(-0.19, -0.13, -0.3, -0.45, -0.28, -0.31), tolerance = 1e-07) expect_equal(comp6[2, ], c(-0.62, -0.57, -0.74, -0.5, -0.42, -0.53), tolerance = 1e-07) }) rpact/tests/testthat/test-f_design_group_sequential_beta_spending.R0000644000176200001440000013172114410770117025602 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_design_group_sequential_beta_spending.R ## | Creation date: 06 February 2023, 12:12:18 ## | File version: $Revision: 6902 $ ## | Last changed: $Date: 2023-03-29 10:01:19 +0200 (Mi, 29 Mrz 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Group Sequential Design Functionality with Beta Spending") test_that("'getDesignGroupSequential' with two-sided beta spending (1)", { suppressWarnings(x <- getDesignGroupSequential( informationRates = c(0.3, 0.4, 0.8, 1), alpha = 0.05, typeOfDesign = "asKD", gammaA = 2.5, beta = 0.1, sided = 2, typeBetaSpending = "bsKD", gammaB = 1.5, bindingFutility = FALSE )) ## Comparison of the results of TrialDesignGroupSequential object 'x' with expected results expect_equal(x$power, c(0.12305271, 0.24472466, 0.79581714, 0.89999927), tolerance = 1e-06) expect_equal(x$futilityBounds, c(NA_real_, 0.13460523, 1.5007674), tolerance = 1e-06) expect_equal(x$alphaSpent, c(0.0024647515, 0.0050596443, 0.028621671, 0.05), tolerance = 1e-06) expect_equal(x$betaSpent, c(0, 0.010609935, 0.065960997, 0.1), tolerance = 1e-06) expect_equal(x$criticalValues, c(3.0276355, 2.8984727, 2.2289649, 2.0639032), tolerance = 1e-06) expect_equal(x$stageLevels, c(0.0012323758, 0.0018749246, 0.01290812, 0.019513449), tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$power, x$power, tolerance = 1e-05) expect_equal(xCodeBased$futilityBounds, x$futilityBounds, tolerance = 1e-05) expect_equal(xCodeBased$alphaSpent, x$alphaSpent, tolerance = 1e-05) expect_equal(xCodeBased$betaSpent, x$betaSpent, tolerance = 1e-05) expect_equal(xCodeBased$criticalValues, x$criticalValues, tolerance = 1e-05) expect_equal(xCodeBased$stageLevels, x$stageLevels, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } y <- getDesignCharacteristics(x) ## Comparison of the results of TrialDesignCharacteristics object 'y' with expected results expect_equal(y$nFixed, 10.507423, tolerance = 1e-06) expect_equal(y$shift, 11.628636, tolerance = 1e-06) expect_equal(y$inflationFactor, 1.1067068, tolerance = 1e-06) expect_equal(y$information, c(3.4885908, 4.6514544, 9.3029088, 11.628636), tolerance = 1e-06) expect_equal(y$power, c(0.1230532, 0.24472551, 0.79581823, 0.9), tolerance = 1e-06) expect_equal(y$rejectionProbabilities, c(0.1230532, 0.12167232, 0.55109272, 0.10418177), tolerance = 1e-06) expect_equal(y$futilityProbabilities, c(0, 0.010609873, 0.055350639), tolerance = 1e-06) expect_equal(y$averageSampleNumber1, 0.78930804, tolerance = 1e-06) expect_equal(y$averageSampleNumber01, 0.89600313, tolerance = 1e-06) expect_equal(y$averageSampleNumber0, 0.85810726, tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y), NA))) expect_output(print(y)$show()) invisible(capture.output(expect_error(summary(y), NA))) expect_output(summary(y)$show()) suppressWarnings(yCodeBased <- eval(parse(text = getObjectRCode(y, stringWrapParagraphWidth = NULL)))) expect_equal(yCodeBased$nFixed, y$nFixed, tolerance = 1e-05) expect_equal(yCodeBased$shift, y$shift, tolerance = 1e-05) expect_equal(yCodeBased$inflationFactor, y$inflationFactor, tolerance = 1e-05) expect_equal(yCodeBased$information, y$information, tolerance = 1e-05) expect_equal(yCodeBased$power, y$power, tolerance = 1e-05) expect_equal(yCodeBased$rejectionProbabilities, y$rejectionProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$futilityProbabilities, y$futilityProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber1, y$averageSampleNumber1, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber01, y$averageSampleNumber01, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber0, y$averageSampleNumber0, tolerance = 1e-05) expect_type(names(y), "character") df <- as.data.frame(y) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with two-sided beta spending (2)", { .skipTestIfDisabled() suppressWarnings(x <- getDesignGroupSequential( informationRates = c(0.4, 0.65, 0.8, 1), alpha = 0.05, typeOfDesign = "asKD", gammaA = 2.5, beta = 0.1, sided = 2, typeBetaSpending = "bsKD", gammaB = 1.5, bindingFutility = TRUE )) ## Comparison of the results of TrialDesignGroupSequential object 'x' with expected results expect_equal(x$power, c(0.25984202, 0.62910001, 0.78681992, 0.89999952), tolerance = 1e-06) expect_equal(x$futilityBounds, c(0.3085062, 0.97461473, 1.3896954), tolerance = 1e-06) expect_equal(x$alphaSpent, c(0.0050596443, 0.017031519, 0.02862167, 0.04999999), tolerance = 1e-06) expect_equal(x$betaSpent, c(0.025298221, 0.052404675, 0.071554176, 0.1), tolerance = 1e-06) expect_equal(x$criticalValues, c(2.8032117, 2.4453423, 2.2956849, 1.9878913), tolerance = 1e-06) expect_equal(x$stageLevels, c(0.0025298221, 0.0072357361, 0.010846951, 0.023411854), tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$power, x$power, tolerance = 1e-05) expect_equal(xCodeBased$futilityBounds, x$futilityBounds, tolerance = 1e-05) expect_equal(xCodeBased$alphaSpent, x$alphaSpent, tolerance = 1e-05) expect_equal(xCodeBased$betaSpent, x$betaSpent, tolerance = 1e-05) expect_equal(xCodeBased$criticalValues, x$criticalValues, tolerance = 1e-05) expect_equal(xCodeBased$stageLevels, x$stageLevels, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } y <- getDesignCharacteristics(x) ## Comparison of the results of TrialDesignCharacteristics object 'y' with expected results expect_equal(y$nFixed, 10.507423, tolerance = 1e-06) expect_equal(y$shift, 11.657317, tolerance = 1e-06) expect_equal(y$inflationFactor, 1.1094363, tolerance = 1e-06) expect_equal(y$information, c(4.6629267, 7.5772558, 9.3258533, 11.657317), tolerance = 1e-06) expect_equal(y$power, c(0.25984263, 0.62910091, 0.78682068, 0.9), tolerance = 1e-06) expect_equal(y$rejectionProbabilities, c(0.25984263, 0.36925828, 0.15771977, 0.11317932), tolerance = 1e-06) expect_equal(y$futilityProbabilities, c(0.025298122, 0.027106314, 0.019149403), tolerance = 1e-06) expect_equal(y$averageSampleNumber1, 0.72647427, tolerance = 1e-06) expect_equal(y$averageSampleNumber01, 0.80995959, tolerance = 1e-06) expect_equal(y$averageSampleNumber0, 0.72430062, tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y), NA))) expect_output(print(y)$show()) invisible(capture.output(expect_error(summary(y), NA))) expect_output(summary(y)$show()) suppressWarnings(yCodeBased <- eval(parse(text = getObjectRCode(y, stringWrapParagraphWidth = NULL)))) expect_equal(yCodeBased$nFixed, y$nFixed, tolerance = 1e-05) expect_equal(yCodeBased$shift, y$shift, tolerance = 1e-05) expect_equal(yCodeBased$inflationFactor, y$inflationFactor, tolerance = 1e-05) expect_equal(yCodeBased$information, y$information, tolerance = 1e-05) expect_equal(yCodeBased$power, y$power, tolerance = 1e-05) expect_equal(yCodeBased$rejectionProbabilities, y$rejectionProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$futilityProbabilities, y$futilityProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber1, y$averageSampleNumber1, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber01, y$averageSampleNumber01, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber0, y$averageSampleNumber0, tolerance = 1e-05) expect_type(names(y), "character") df <- as.data.frame(y) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with two-sided beta spending (3)", { .skipTestIfDisabled() suppressWarnings(x <- getDesignGroupSequential( informationRates = c(0.15, 0.25, 0.8, 1), alpha = 0.025, typeOfDesign = "asOF", beta = 0.07, sided = 2, typeBetaSpending = "bsUser", userBetaSpending = c(0.15, 0.25, 0.8, 1) * 0.07, bindingFutility = FALSE )) ## Comparison of the results of TrialDesignGroupSequential object 'x' with expected results expect_equal(x$power, c(6.6585296e-07, 0.0017900571, 0.82193405, 0.93), tolerance = 1e-06) expect_equal(x$futilityBounds, c(NA_real_, NA_real_, 1.8509555), tolerance = 1e-06) expect_equal(x$alphaSpent, c(2.2511014e-10, 1.1742122e-06, 0.01045986, 0.025), tolerance = 1e-06) expect_equal(x$betaSpent, c(0, 0, 0.051333333, 0.07), tolerance = 1e-06) expect_equal(x$criticalValues, c(6.3431527, 4.8600403, 2.560259, 2.292451), tolerance = 1e-06) expect_equal(x$stageLevels, c(1.1255508e-10, 5.8680939e-07, 0.0052297087, 0.010939815), tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$power, x$power, tolerance = 1e-05) expect_equal(xCodeBased$futilityBounds, x$futilityBounds, tolerance = 1e-05) expect_equal(xCodeBased$alphaSpent, x$alphaSpent, tolerance = 1e-05) expect_equal(xCodeBased$betaSpent, x$betaSpent, tolerance = 1e-05) expect_equal(xCodeBased$criticalValues, x$criticalValues, tolerance = 1e-05) expect_equal(xCodeBased$stageLevels, x$stageLevels, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } y <- getDesignCharacteristics(x) ## Comparison of the results of TrialDesignCharacteristics object 'y' with expected results expect_equal(y$nFixed, 13.817529, tolerance = 1e-06) expect_equal(y$shift, 15.164247, tolerance = 1e-06) expect_equal(y$inflationFactor, 1.0974644, tolerance = 1e-06) expect_equal(y$information, c(2.274637, 3.7910617, 12.131397, 15.164247), tolerance = 1e-06) expect_equal(y$power, c(6.6585297e-07, 0.0017900572, 0.82193406, 0.93), tolerance = 1e-06) expect_equal(y$rejectionProbabilities, c(6.6585297e-07, 0.0017893913, 0.820144, 0.10806594), tolerance = 1e-06) expect_equal(y$futilityProbabilities, c(0, 0, 0.051333332), tolerance = 1e-06) expect_equal(y$averageSampleNumber1, 0.90470787, tolerance = 1e-06) expect_equal(y$averageSampleNumber01, 0.93283887, tolerance = 1e-06) expect_equal(y$averageSampleNumber0, 0.88976115, tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y), NA))) expect_output(print(y)$show()) invisible(capture.output(expect_error(summary(y), NA))) expect_output(summary(y)$show()) suppressWarnings(yCodeBased <- eval(parse(text = getObjectRCode(y, stringWrapParagraphWidth = NULL)))) expect_equal(yCodeBased$nFixed, y$nFixed, tolerance = 1e-05) expect_equal(yCodeBased$shift, y$shift, tolerance = 1e-05) expect_equal(yCodeBased$inflationFactor, y$inflationFactor, tolerance = 1e-05) expect_equal(yCodeBased$information, y$information, tolerance = 1e-05) expect_equal(yCodeBased$power, y$power, tolerance = 1e-05) expect_equal(yCodeBased$rejectionProbabilities, y$rejectionProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$futilityProbabilities, y$futilityProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber1, y$averageSampleNumber1, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber01, y$averageSampleNumber01, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber0, y$averageSampleNumber0, tolerance = 1e-05) expect_type(names(y), "character") df <- as.data.frame(y) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with two-sided beta spending (4)", { .skipTestIfDisabled() suppressWarnings(x <- getDesignGroupSequential( informationRates = c(0.35, 0.55, 0.8, 1), alpha = 0.035, beta = 0.065, sided = 2, typeOfDesign = "asKD", gammaA = 1.44, typeBetaSpending = "bsKD", gammaB = 1.35, bindingFutility = TRUE )) ## Comparison of the results of TrialDesignGroupSequential object 'x' with expected results expect_equal(x$power, c(0.36066758, 0.64302509, 0.86790998, 0.93499965), tolerance = 1e-06) expect_equal(x$futilityBounds, c(0.26890478, 0.80339676, 1.6440939), tolerance = 1e-06) expect_equal(x$alphaSpent, c(0.0077183777, 0.014797567, 0.02538152, 0.035), tolerance = 1e-06) expect_equal(x$betaSpent, c(0.015754521, 0.029000333, 0.048093329, 0.065), tolerance = 1e-06) expect_equal(x$criticalValues, c(2.6641472, 2.5778904, 2.3970592, 2.2417317), tolerance = 1e-06) expect_equal(x$stageLevels, c(0.0038591889, 0.0049702762, 0.0082636274, 0.01248936), tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$power, x$power, tolerance = 1e-05) expect_equal(xCodeBased$futilityBounds, x$futilityBounds, tolerance = 1e-05) expect_equal(xCodeBased$alphaSpent, x$alphaSpent, tolerance = 1e-05) expect_equal(xCodeBased$betaSpent, x$betaSpent, tolerance = 1e-05) expect_equal(xCodeBased$criticalValues, x$criticalValues, tolerance = 1e-05) expect_equal(xCodeBased$stageLevels, x$stageLevels, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } y <- getDesignCharacteristics(x) ## Comparison of the results of TrialDesignCharacteristics object 'y' with expected results expect_equal(y$nFixed, 13.122219, tolerance = 1e-06) expect_equal(y$shift, 15.212676, tolerance = 1e-06) expect_equal(y$inflationFactor, 1.1593067, tolerance = 1e-06) expect_equal(y$information, c(5.3244366, 8.3669718, 12.170141, 15.212676), tolerance = 1e-06) expect_equal(y$power, c(0.36066826, 0.64302593, 0.86791055, 0.935), tolerance = 1e-06) expect_equal(y$rejectionProbabilities, c(0.36066826, 0.28235767, 0.22488463, 0.067089448), tolerance = 1e-06) expect_equal(y$futilityProbabilities, c(0.015754457, 0.013245738, 0.019092883), tolerance = 1e-06) expect_equal(y$averageSampleNumber1, 0.66487165, tolerance = 1e-06) expect_equal(y$averageSampleNumber01, 0.81339132, tolerance = 1e-06) expect_equal(y$averageSampleNumber0, 0.7082656, tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y), NA))) expect_output(print(y)$show()) invisible(capture.output(expect_error(summary(y), NA))) expect_output(summary(y)$show()) suppressWarnings(yCodeBased <- eval(parse(text = getObjectRCode(y, stringWrapParagraphWidth = NULL)))) expect_equal(yCodeBased$nFixed, y$nFixed, tolerance = 1e-05) expect_equal(yCodeBased$shift, y$shift, tolerance = 1e-05) expect_equal(yCodeBased$inflationFactor, y$inflationFactor, tolerance = 1e-05) expect_equal(yCodeBased$information, y$information, tolerance = 1e-05) expect_equal(yCodeBased$power, y$power, tolerance = 1e-05) expect_equal(yCodeBased$rejectionProbabilities, y$rejectionProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$futilityProbabilities, y$futilityProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber1, y$averageSampleNumber1, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber01, y$averageSampleNumber01, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber0, y$averageSampleNumber0, tolerance = 1e-05) expect_type(names(y), "character") df <- as.data.frame(y) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with two-sided beta spending (5)", { .skipTestIfDisabled() suppressWarnings(x <- getDesignGroupSequential( informationRates = c(0.35, 0.4, 0.8, 1), alpha = 0.025, typeOfDesign = "asOF", beta = 0.07, sided = 2, typeBetaSpending = "bsKD", gammaB = 1, bindingFutility = TRUE )) ## Comparison of the results of TrialDesignGroupSequential object 'x' with expected results expect_equal(x$power, c(0.040854392, 0.095590668, 0.82438045, 0.93), tolerance = 1e-06) expect_equal(x$futilityBounds, c(0.40613204, 0.2966315, 1.7350381), tolerance = 1e-06) expect_equal(x$alphaSpent, c(4.8451862e-05, 0.00015681312, 0.010459859, 0.025), tolerance = 1e-06) expect_equal(x$betaSpent, c(0.0245, 0.028, 0.056, 0.07), tolerance = 1e-06) expect_equal(x$criticalValues, c(4.0629719, 3.8052638, 2.5524087, 2.1888976), tolerance = 1e-06) expect_equal(x$stageLevels, c(2.4225931e-05, 7.0826578e-05, 0.0053490474, 0.014302143), tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$power, x$power, tolerance = 1e-05) expect_equal(xCodeBased$futilityBounds, x$futilityBounds, tolerance = 1e-05) expect_equal(xCodeBased$alphaSpent, x$alphaSpent, tolerance = 1e-05) expect_equal(xCodeBased$betaSpent, x$betaSpent, tolerance = 1e-05) expect_equal(xCodeBased$criticalValues, x$criticalValues, tolerance = 1e-05) expect_equal(xCodeBased$stageLevels, x$stageLevels, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } y <- getDesignCharacteristics(x) ## Comparison of the results of TrialDesignCharacteristics object 'y' with expected results expect_equal(y$nFixed, 13.817529, tolerance = 1e-06) expect_equal(y$shift, 15.406346, tolerance = 1e-06) expect_equal(y$inflationFactor, 1.1149856, tolerance = 1e-06) expect_equal(y$information, c(5.3922213, 6.1625386, 12.325077, 15.406346), tolerance = 1e-06) expect_equal(y$power, c(0.040854392, 0.095590669, 0.82438046, 0.93), tolerance = 1e-06) expect_equal(y$rejectionProbabilities, c(0.040854392, 0.054736277, 0.72878979, 0.10561954), tolerance = 1e-06) expect_equal(y$futilityProbabilities, c(0.0245, 0.0034999999, 0.028), tolerance = 1e-06) expect_equal(y$averageSampleNumber1, 0.85989912, tolerance = 1e-06) expect_equal(y$averageSampleNumber01, 0.85050502, tolerance = 1e-06) expect_equal(y$averageSampleNumber0, 0.71709153, tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y), NA))) expect_output(print(y)$show()) invisible(capture.output(expect_error(summary(y), NA))) expect_output(summary(y)$show()) suppressWarnings(yCodeBased <- eval(parse(text = getObjectRCode(y, stringWrapParagraphWidth = NULL)))) expect_equal(yCodeBased$nFixed, y$nFixed, tolerance = 1e-05) expect_equal(yCodeBased$shift, y$shift, tolerance = 1e-05) expect_equal(yCodeBased$inflationFactor, y$inflationFactor, tolerance = 1e-05) expect_equal(yCodeBased$information, y$information, tolerance = 1e-05) expect_equal(yCodeBased$power, y$power, tolerance = 1e-05) expect_equal(yCodeBased$rejectionProbabilities, y$rejectionProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$futilityProbabilities, y$futilityProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber1, y$averageSampleNumber1, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber01, y$averageSampleNumber01, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber0, y$averageSampleNumber0, tolerance = 1e-05) expect_type(names(y), "character") df <- as.data.frame(y) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with two-sided beta spending (6)", { .skipTestIfDisabled() suppressWarnings(x <- getDesignGroupSequential( informationRates = c(0.35, 0.4, 0.8, 1), alpha = 0.025, typeOfDesign = "asOF", beta = 0.07, sided = 2, typeBetaSpending = "bsUser", userBetaSpending = c(0.15, 0.4, 0.8, 1) * 0.01, bindingFutility = TRUE )) ## Comparison of the results of TrialDesignGroupSequential object 'x' with expected results expect_equal(x$power, c(0.10679919, 0.21860668, 0.95487449, 0.99), tolerance = 1e-06) expect_equal(x$futilityBounds, c(NA_real_, 0.30649568, 1.7163392), tolerance = 1e-06) expect_equal(x$alphaSpent, c(4.8451862e-05, 0.00015681312, 0.010459859, 0.025), tolerance = 1e-06) expect_equal(x$betaSpent, c(0, 0.0029411764, 0.0076470588, 0.01), tolerance = 1e-06) expect_equal(x$criticalValues, c(4.0629719, 3.8052638, 2.559217, 2.2179478), tolerance = 1e-06) expect_equal(x$stageLevels, c(2.4225931e-05, 7.0826578e-05, 0.0052454112, 0.013279197), tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$power, x$power, tolerance = 1e-05) expect_equal(xCodeBased$futilityBounds, x$futilityBounds, tolerance = 1e-05) expect_equal(xCodeBased$alphaSpent, x$alphaSpent, tolerance = 1e-05) expect_equal(xCodeBased$betaSpent, x$betaSpent, tolerance = 1e-05) expect_equal(xCodeBased$criticalValues, x$criticalValues, tolerance = 1e-05) expect_equal(xCodeBased$stageLevels, x$stageLevels, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } y <- getDesignCharacteristics(x) ## Comparison of the results of TrialDesignCharacteristics object 'y' with expected results expect_equal(y$nFixed, 13.817529, tolerance = 1e-06) expect_equal(y$shift, 14.763145, tolerance = 1e-06) expect_equal(y$inflationFactor, 1.068436, tolerance = 1e-06) expect_equal(y$information, c(5.1671008, 5.9052581, 11.810516, 14.763145), tolerance = 1e-06) expect_equal(y$power, c(0.036739326, 0.087021046, 0.80817834, 0.93), tolerance = 1e-06) expect_equal(y$rejectionProbabilities, c(0.036739326, 0.05028172, 0.7211573, 0.12182166), tolerance = 1e-06) expect_equal(y$futilityProbabilities, c(0, 0.013748548, 0.0362213), tolerance = 1e-06) expect_equal(y$averageSampleNumber1, 0.84003165, tolerance = 1e-06) expect_equal(y$averageSampleNumber01, 0.86354683, tolerance = 1e-06) expect_equal(y$averageSampleNumber0, 0.76707979, tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y), NA))) expect_output(print(y)$show()) invisible(capture.output(expect_error(summary(y), NA))) expect_output(summary(y)$show()) suppressWarnings(yCodeBased <- eval(parse(text = getObjectRCode(y, stringWrapParagraphWidth = NULL)))) expect_equal(yCodeBased$nFixed, y$nFixed, tolerance = 1e-05) expect_equal(yCodeBased$shift, y$shift, tolerance = 1e-05) expect_equal(yCodeBased$inflationFactor, y$inflationFactor, tolerance = 1e-05) expect_equal(yCodeBased$information, y$information, tolerance = 1e-05) expect_equal(yCodeBased$power, y$power, tolerance = 1e-05) expect_equal(yCodeBased$rejectionProbabilities, y$rejectionProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$futilityProbabilities, y$futilityProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber1, y$averageSampleNumber1, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber01, y$averageSampleNumber01, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber0, y$averageSampleNumber0, tolerance = 1e-05) expect_type(names(y), "character") df <- as.data.frame(y) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with two-sided beta spending (7)", { .skipTestIfDisabled() suppressWarnings(x <- getDesignGroupSequential( informationRates = c(0.15, 0.4, 0.8, 1), alpha = 0.025, typeOfDesign = "asOF", beta = 0.01, sided = 2, typeBetaSpending = "bsKD", gammaB = 1, bindingFutility = FALSE )) ## Comparison of the results of TrialDesignGroupSequential object 'x' with expected results expect_equal(x$power, c(3.8817384e-06, 0.23459419, 0.95988861, 0.99), tolerance = 1e-06) expect_equal(x$futilityBounds, c(NA_real_, 0.33832477, 1.7778049), tolerance = 1e-06) expect_equal(x$alphaSpent, c(2.2511014e-10, 0.00015681311, 0.010459859, 0.02499999), tolerance = 1e-06) expect_equal(x$betaSpent, c(0, 0.0029411765, 0.0076470589, 0.0099999999), tolerance = 1e-06) expect_equal(x$criticalValues, c(6.3431527, 3.7800251, 2.5620799, 2.2927506), tolerance = 1e-06) expect_equal(x$stageLevels, c(1.1255508e-10, 7.8406284e-05, 0.0052023689, 0.010931184), tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$power, x$power, tolerance = 1e-05) expect_equal(xCodeBased$futilityBounds, x$futilityBounds, tolerance = 1e-05) expect_equal(xCodeBased$alphaSpent, x$alphaSpent, tolerance = 1e-05) expect_equal(xCodeBased$betaSpent, x$betaSpent, tolerance = 1e-05) expect_equal(xCodeBased$criticalValues, x$criticalValues, tolerance = 1e-05) expect_equal(xCodeBased$stageLevels, x$stageLevels, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } y <- getDesignCharacteristics(x) ## Comparison of the results of TrialDesignCharacteristics object 'y' with expected results expect_equal(y$nFixed, 20.864346, tolerance = 1e-06) expect_equal(y$shift, 23.351275, tolerance = 1e-06) expect_equal(y$inflationFactor, 1.1191952, tolerance = 1e-06) expect_equal(y$information, c(3.5026912, 9.3405098, 18.68102, 23.351275), tolerance = 1e-06) expect_equal(y$power, c(3.8817384e-06, 0.23459418, 0.9598886, 0.99), tolerance = 1e-06) expect_equal(y$rejectionProbabilities, c(3.8817384e-06, 0.2345903, 0.72529442, 0.030111395), tolerance = 1e-06) expect_equal(y$futilityProbabilities, c(0, 0.0029411765, 0.0047058824), tolerance = 1e-06) expect_equal(y$averageSampleNumber1, 0.79628246, tolerance = 1e-06) expect_equal(y$averageSampleNumber01, 0.91655265, tolerance = 1e-06) expect_equal(y$averageSampleNumber0, 0.79046909, tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y), NA))) expect_output(print(y)$show()) invisible(capture.output(expect_error(summary(y), NA))) expect_output(summary(y)$show()) suppressWarnings(yCodeBased <- eval(parse(text = getObjectRCode(y, stringWrapParagraphWidth = NULL)))) expect_equal(yCodeBased$nFixed, y$nFixed, tolerance = 1e-05) expect_equal(yCodeBased$shift, y$shift, tolerance = 1e-05) expect_equal(yCodeBased$inflationFactor, y$inflationFactor, tolerance = 1e-05) expect_equal(yCodeBased$information, y$information, tolerance = 1e-05) expect_equal(yCodeBased$power, y$power, tolerance = 1e-05) expect_equal(yCodeBased$rejectionProbabilities, y$rejectionProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$futilityProbabilities, y$futilityProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber1, y$averageSampleNumber1, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber01, y$averageSampleNumber01, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber0, y$averageSampleNumber0, tolerance = 1e-05) expect_type(names(y), "character") df <- as.data.frame(y) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with two-sided beta spending (8)", { .skipTestIfDisabled() suppressWarnings(x <- getDesignGroupSequential( informationRates = c(0.15, 0.4, 0.8, 1), alpha = 0.025, typeOfDesign = "asOF", beta = 0.01, sided = 2, typeBetaSpending = "bsUser", userBetaSpending = c(0.15, 0.4, 0.8, 1) * 0.01, bindingFutility = FALSE )) ## Comparison of the results of TrialDesignGroupSequential object 'x' with expected results expect_equal(x$power, c(3.8817384e-06, 0.23459419, 0.95988861, 0.99), tolerance = 1e-06) expect_equal(x$futilityBounds, c(NA_real_, 0.33832477, 1.7778049), tolerance = 1e-06) expect_equal(x$alphaSpent, c(2.2511014e-10, 0.00015681311, 0.010459859, 0.02499999), tolerance = 1e-06) expect_equal(x$betaSpent, c(0, 0.0029411765, 0.0076470589, 0.0099999999), tolerance = 1e-06) expect_equal(x$criticalValues, c(6.3431527, 3.7800251, 2.5620799, 2.2927506), tolerance = 1e-06) expect_equal(x$stageLevels, c(1.1255508e-10, 7.8406284e-05, 0.0052023689, 0.010931184), tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$power, x$power, tolerance = 1e-05) expect_equal(xCodeBased$futilityBounds, x$futilityBounds, tolerance = 1e-05) expect_equal(xCodeBased$alphaSpent, x$alphaSpent, tolerance = 1e-05) expect_equal(xCodeBased$betaSpent, x$betaSpent, tolerance = 1e-05) expect_equal(xCodeBased$criticalValues, x$criticalValues, tolerance = 1e-05) expect_equal(xCodeBased$stageLevels, x$stageLevels, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } y <- getDesignCharacteristics(x) ## Comparison of the results of TrialDesignCharacteristics object 'y' with expected results expect_equal(y$nFixed, 20.864346, tolerance = 1e-06) expect_equal(y$shift, 23.351275, tolerance = 1e-06) expect_equal(y$inflationFactor, 1.1191952, tolerance = 1e-06) expect_equal(y$information, c(3.5026912, 9.3405098, 18.68102, 23.351275), tolerance = 1e-06) expect_equal(y$power, c(3.8817384e-06, 0.23459418, 0.9598886, 0.99), tolerance = 1e-06) expect_equal(y$rejectionProbabilities, c(3.8817384e-06, 0.2345903, 0.72529442, 0.030111395), tolerance = 1e-06) expect_equal(y$futilityProbabilities, c(0, 0.0029411765, 0.0047058824), tolerance = 1e-06) expect_equal(y$averageSampleNumber1, 0.79628246, tolerance = 1e-06) expect_equal(y$averageSampleNumber01, 0.91655265, tolerance = 1e-06) expect_equal(y$averageSampleNumber0, 0.79046909, tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y), NA))) expect_output(print(y)$show()) invisible(capture.output(expect_error(summary(y), NA))) expect_output(summary(y)$show()) suppressWarnings(yCodeBased <- eval(parse(text = getObjectRCode(y, stringWrapParagraphWidth = NULL)))) expect_equal(yCodeBased$nFixed, y$nFixed, tolerance = 1e-05) expect_equal(yCodeBased$shift, y$shift, tolerance = 1e-05) expect_equal(yCodeBased$inflationFactor, y$inflationFactor, tolerance = 1e-05) expect_equal(yCodeBased$information, y$information, tolerance = 1e-05) expect_equal(yCodeBased$power, y$power, tolerance = 1e-05) expect_equal(yCodeBased$rejectionProbabilities, y$rejectionProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$futilityProbabilities, y$futilityProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber1, y$averageSampleNumber1, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber01, y$averageSampleNumber01, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber0, y$averageSampleNumber0, tolerance = 1e-05) expect_type(names(y), "character") df <- as.data.frame(y) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with two-sided beta spending (9)", { .skipTestIfDisabled() suppressWarnings(x <- getDesignGroupSequential( informationRates = c(0.35, 0.55, 0.8, 1), alpha = 0.035, beta = 0.065, sided = 2, typeOfDesign = "asKD", gammaA = 1.44, typeBetaSpending = "bsKD", gammaB = 1.35, bindingFutility = FALSE )) ## Comparison of the results of TrialDesignGroupSequential object 'x' with expected results expect_equal(x$power, c(0.37576504, 0.66123596, 0.8775957, 0.93499972), tolerance = 1e-06) expect_equal(x$futilityBounds, c(0.29185889, 0.85901114, 1.708006), tolerance = 1e-06) expect_equal(x$alphaSpent, c(0.0077183777, 0.014797567, 0.02538152, 0.03499999), tolerance = 1e-06) expect_equal(x$betaSpent, c(0.015754521, 0.029000333, 0.048093329, 0.065), tolerance = 1e-06) expect_equal(x$criticalValues, c(2.6641472, 2.5781312, 2.4084661, 2.3291234), tolerance = 1e-06) expect_equal(x$stageLevels, c(0.0038591889, 0.0049668138, 0.0080098571, 0.0099262635), tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$power, x$power, tolerance = 1e-05) expect_equal(xCodeBased$futilityBounds, x$futilityBounds, tolerance = 1e-05) expect_equal(xCodeBased$alphaSpent, x$alphaSpent, tolerance = 1e-05) expect_equal(xCodeBased$betaSpent, x$betaSpent, tolerance = 1e-05) expect_equal(xCodeBased$criticalValues, x$criticalValues, tolerance = 1e-05) expect_equal(xCodeBased$stageLevels, x$stageLevels, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } y <- getDesignCharacteristics(x) ## Comparison of the results of TrialDesignCharacteristics object 'y' with expected results expect_equal(y$nFixed, 13.122219, tolerance = 1e-06) expect_equal(y$shift, 15.745369, tolerance = 1e-06) expect_equal(y$inflationFactor, 1.1999015, tolerance = 1e-06) expect_equal(y$information, c(5.5108793, 8.6599532, 12.596296, 15.745369), tolerance = 1e-06) expect_equal(y$power, c(0.3757656, 0.66123663, 0.87759614, 0.935), tolerance = 1e-06) expect_equal(y$rejectionProbabilities, c(0.3757656, 0.28547103, 0.21635951, 0.057403857), tolerance = 1e-06) expect_equal(y$futilityProbabilities, c(0.015754468, 0.013245752, 0.019092905), tolerance = 1e-06) expect_equal(y$averageSampleNumber1, 0.67674292, tolerance = 1e-06) expect_equal(y$averageSampleNumber01, 0.83002063, tolerance = 1e-06) expect_equal(y$averageSampleNumber0, 0.7178349, tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y), NA))) expect_output(print(y)$show()) invisible(capture.output(expect_error(summary(y), NA))) expect_output(summary(y)$show()) suppressWarnings(yCodeBased <- eval(parse(text = getObjectRCode(y, stringWrapParagraphWidth = NULL)))) expect_equal(yCodeBased$nFixed, y$nFixed, tolerance = 1e-05) expect_equal(yCodeBased$shift, y$shift, tolerance = 1e-05) expect_equal(yCodeBased$inflationFactor, y$inflationFactor, tolerance = 1e-05) expect_equal(yCodeBased$information, y$information, tolerance = 1e-05) expect_equal(yCodeBased$power, y$power, tolerance = 1e-05) expect_equal(yCodeBased$rejectionProbabilities, y$rejectionProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$futilityProbabilities, y$futilityProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber1, y$averageSampleNumber1, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber01, y$averageSampleNumber01, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber0, y$averageSampleNumber0, tolerance = 1e-05) expect_type(names(y), "character") df <- as.data.frame(y) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with two-sided beta spending (10)", { .skipTestIfDisabled() suppressWarnings(x <- getDesignGroupSequential( informationRates = c(0.15, 0.4, 0.8, 1), alpha = 0.025, typeOfDesign = "asOF", beta = 0.01, sided = 2, typeBetaSpending = "bsUser", userBetaSpending = c(0.15, 0.4, 0.8, 1) * 0.01, bindingFutility = FALSE )) ## Comparison of the results of TrialDesignGroupSequential object 'x' with expected results expect_equal(x$power, c(3.8817384e-06, 0.23459419, 0.95988861, 0.99), tolerance = 1e-06) expect_equal(x$futilityBounds, c(NA_real_, 0.33832477, 1.7778049), tolerance = 1e-06) expect_equal(x$alphaSpent, c(2.2511014e-10, 0.00015681311, 0.010459859, 0.02499999), tolerance = 1e-06) expect_equal(x$betaSpent, c(0, 0.0029411765, 0.0076470589, 0.0099999999), tolerance = 1e-06) expect_equal(x$criticalValues, c(6.3431527, 3.7800251, 2.5620799, 2.2927506), tolerance = 1e-06) expect_equal(x$stageLevels, c(1.1255508e-10, 7.8406284e-05, 0.0052023689, 0.010931184), tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$power, x$power, tolerance = 1e-05) expect_equal(xCodeBased$futilityBounds, x$futilityBounds, tolerance = 1e-05) expect_equal(xCodeBased$alphaSpent, x$alphaSpent, tolerance = 1e-05) expect_equal(xCodeBased$betaSpent, x$betaSpent, tolerance = 1e-05) expect_equal(xCodeBased$criticalValues, x$criticalValues, tolerance = 1e-05) expect_equal(xCodeBased$stageLevels, x$stageLevels, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } y <- getDesignCharacteristics(x) ## Comparison of the results of TrialDesignCharacteristics object 'y' with expected results expect_equal(y$nFixed, 20.864346, tolerance = 1e-06) expect_equal(y$shift, 23.351275, tolerance = 1e-06) expect_equal(y$inflationFactor, 1.1191952, tolerance = 1e-06) expect_equal(y$information, c(3.5026912, 9.3405098, 18.68102, 23.351275), tolerance = 1e-06) expect_equal(y$power, c(3.8817384e-06, 0.23459418, 0.9598886, 0.99), tolerance = 1e-06) expect_equal(y$rejectionProbabilities, c(3.8817384e-06, 0.2345903, 0.72529442, 0.030111395), tolerance = 1e-06) expect_equal(y$futilityProbabilities, c(0, 0.0029411765, 0.0047058824), tolerance = 1e-06) expect_equal(y$averageSampleNumber1, 0.79628246, tolerance = 1e-06) expect_equal(y$averageSampleNumber01, 0.91655265, tolerance = 1e-06) expect_equal(y$averageSampleNumber0, 0.79046909, tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y), NA))) expect_output(print(y)$show()) invisible(capture.output(expect_error(summary(y), NA))) expect_output(summary(y)$show()) suppressWarnings(yCodeBased <- eval(parse(text = getObjectRCode(y, stringWrapParagraphWidth = NULL)))) expect_equal(yCodeBased$nFixed, y$nFixed, tolerance = 1e-05) expect_equal(yCodeBased$shift, y$shift, tolerance = 1e-05) expect_equal(yCodeBased$inflationFactor, y$inflationFactor, tolerance = 1e-05) expect_equal(yCodeBased$information, y$information, tolerance = 1e-05) expect_equal(yCodeBased$power, y$power, tolerance = 1e-05) expect_equal(yCodeBased$rejectionProbabilities, y$rejectionProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$futilityProbabilities, y$futilityProbabilities, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber1, y$averageSampleNumber1, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber01, y$averageSampleNumber01, tolerance = 1e-05) expect_equal(yCodeBased$averageSampleNumber0, y$averageSampleNumber0, tolerance = 1e-05) expect_type(names(y), "character") df <- as.data.frame(y) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) rpact/tests/testthat/test-f_simulation_base_survival.R0000644000176200001440000066436214370207346023132 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_simulation_base_survival.R ## | Creation date: 06 February 2023, 15:07:37 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Simulation Survival Function") test_that("'getSimulationSurvival': configuration 1", { # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} simulationResults <- getSimulationSurvival( maxNumberOfSubjects = 200, plannedEvents = 50, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2), maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results expect_equal(simulationResults$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResults$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[1, ], c(22.223223, 18.818775, 16.321595, 14.790808), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(22.223223, 18.818775, 16.321595, 14.790808), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$overallReject, c(0.01, 0.41, 0.81, 1), tolerance = 1e-07) expect_gte(object = simulationResults$overallReject[1], expected = 0) expect_gte(object = simulationResults$overallReject[2], expected = 0) expect_gte(object = simulationResults$overallReject[3], expected = 0) expect_gte(object = simulationResults$overallReject[4], expected = 0) expect_lte(object = simulationResults$overallReject[1], expected = 1) expect_lte(object = simulationResults$overallReject[2], expected = 1) expect_lte(object = simulationResults$overallReject[3], expected = 1) expect_lte(object = simulationResults$overallReject[4], expected = 1) expect_equal(simulationResults$rejectPerStage[1, ], c(0.01, 0.41, 0.81, 1), tolerance = 1e-07) expect_equal(simulationResults$earlyStop, c(0, 0, 0, 0)) expect_equal(simulationResults$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResults$expectedNumberOfEvents, c(50, 50, 50, 50)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResults), NA))) expect_output(print(simulationResults)$show()) invisible(capture.output(expect_error(summary(simulationResults), NA))) expect_output(summary(simulationResults)$show()) simulationResultsCodeBased <- eval(parse(text = getObjectRCode(simulationResults, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultsCodeBased$median1, simulationResults$median1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$median2, simulationResults$median2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$accrualIntensity, simulationResults$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda1, simulationResults$lambda1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda2, simulationResults$lambda2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$hazardRatio, simulationResults$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$analysisTime, simulationResults$analysisTime, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$studyDuration, simulationResults$studyDuration, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsNotAchieved, simulationResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$iterations, simulationResults$iterations, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallReject, simulationResults$overallReject, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$rejectPerStage, simulationResults$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityPerStage, simulationResults$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$earlyStop, simulationResults$earlyStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfSubjects, simulationResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfEvents, simulationResults$expectedNumberOfEvents, tolerance = 1e-05) expect_type(names(simulationResults), "character") df <- as.data.frame(simulationResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': configuration 2", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} design <- getDesignFisher(kMax = 3, alpha0Vec = c(0.5, 0.5)) simulationResults <- getSimulationSurvival( design = design, pi2 = 0.6, pi1 = seq(0.3, 0.45, 0.05), directionUpper = FALSE, maxNumberOfSubjects = 500, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 200), maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results expect_equal(simulationResults$median1, c(23.320299, 19.308487, 16.282985, 13.9131), tolerance = 1e-07) expect_equal(simulationResults$median2, 9.0776496, tolerance = 1e-07) expect_equal(simulationResults$accrualIntensity, c(23.809524, 47.619048, 47.619048), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.029722912, 0.035898576, 0.042568802, 0.04981975), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.076357561, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(0.38925958, 0.47013781, 0.55749295, 0.6524534), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[1, ], c(5.4183926, 5.2945044, 5.1495619, 5.0392001), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(10.130549, 10.39649, 10.458778, 9.7641943), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(13.506679, 14.455396, 18.382917, 18.866629), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(8.500396, 9.4448778, 11.628285, 12.227203), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0, 0, 0, 0)) expect_equal(simulationResults$numberOfSubjects[1, ], c(186.51, 180.63, 173.73, 168.48), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(406.05, 420.67123, 424.60256, 393.44615), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(428.4, 466.33333, 480.96429, 488.78261), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(64.483333, 73.054795, 78.884615, 72.015385), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(70.6, 80.555556, 134.14286, 156.02174), tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$overallEventsPerStage[2, ], c(84.483333, 93.054795, 98.884615, 92.015385), tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[3, ], c(155.08333, 173.61035, 233.02747, 248.03712), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(60, 73, 78, 65)) expect_equal(simulationResults$iterations[3, ], c(5, 9, 28, 46)) expect_equal(simulationResults$overallReject, c(1, 0.93, 0.96, 0.69), tolerance = 1e-07) expect_gte(object = simulationResults$overallReject[1], expected = 0) expect_gte(object = simulationResults$overallReject[2], expected = 0) expect_gte(object = simulationResults$overallReject[3], expected = 0) expect_gte(object = simulationResults$overallReject[4], expected = 0) expect_lte(object = simulationResults$overallReject[1], expected = 1) expect_lte(object = simulationResults$overallReject[2], expected = 1) expect_lte(object = simulationResults$overallReject[3], expected = 1) expect_lte(object = simulationResults$overallReject[4], expected = 1) expect_equal(simulationResults$rejectPerStage[1, ], c(0.4, 0.21, 0.2, 0.13), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.55, 0.63, 0.5, 0.15), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.05, 0.09, 0.26, 0.41), tolerance = 1e-07) expect_equal(simulationResults$futilityStop, c(0, 0.07, 0.02, 0.26), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], c(0, 0.06, 0.02, 0.22), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[2, ], c(0, 0.01, 0, 0.04), tolerance = 1e-07) expect_equal(simulationResults$earlyStop, c(0.95, 0.91, 0.72, 0.54), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, c(319.3515, 359.96969, 385.19188, 358.56277), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(62.22, 80.58, 119.09, 138.58), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.61612368, 0.57564124, 0.49458667, 0.52832804), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.78816558, 0.77803263, 0.64572713, 0.66129837), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResults), NA))) expect_output(print(simulationResults)$show()) invisible(capture.output(expect_error(summary(simulationResults), NA))) expect_output(summary(simulationResults)$show()) simulationResultsCodeBased <- eval(parse(text = getObjectRCode(simulationResults, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultsCodeBased$median1, simulationResults$median1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$median2, simulationResults$median2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$accrualIntensity, simulationResults$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda1, simulationResults$lambda1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda2, simulationResults$lambda2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$hazardRatio, simulationResults$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$analysisTime, simulationResults$analysisTime, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$studyDuration, simulationResults$studyDuration, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsNotAchieved, simulationResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$numberOfSubjects, simulationResults$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsPerStage, simulationResults$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallEventsPerStage, simulationResults$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$iterations, simulationResults$iterations, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallReject, simulationResults$overallReject, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$rejectPerStage, simulationResults$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityStop, simulationResults$futilityStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityPerStage, simulationResults$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$earlyStop, simulationResults$earlyStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfSubjects, simulationResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfEvents, simulationResults$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$conditionalPowerAchieved, simulationResults$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResults), "character") df <- as.data.frame(simulationResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': configuration 3", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} design <- getDesignFisher(kMax = 3, alpha0Vec = c(0.5, 0.5)) simulationResults <- getSimulationSurvival( design = design, pi2 = 0.2, pi1 = seq(0.3, 0.45, 0.05), directionUpper = TRUE, maxNumberOfSubjects = 500, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 200), maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results expect_equal(simulationResults$median1, c(23.320299, 19.308487, 16.282985, 13.9131), tolerance = 1e-07) expect_equal(simulationResults$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResults$accrualIntensity, c(23.809524, 47.619048, 47.619048), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.029722912, 0.035898576, 0.042568802, 0.04981975), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(1.5984103, 1.9305192, 2.2892242, 2.6791588), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[1, ], c(7.2763799, 7.0838561, 6.7193502, 6.3616317), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(16.764021, 14.756285, 13.821816, 12.988284), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(38.977945, 24.200748, 26.934721, 11.875967), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(22.098154, 13.978342, 11.899449, 9.7796143), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0, 0, 0, 0)) expect_equal(simulationResults$numberOfSubjects[1, ], c(275.04, 265.86, 248.46, 231.45), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(496.07246, 481.84722, 476, 463.84), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(500, 500, 500, 494)) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(86.507246, 74.541667, 74.677966, 74.48), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(155.24324, 97.666667, 124.28571, 37), tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$overallEventsPerStage[2, ], c(106.50725, 94.541667, 94.677966, 94.48), tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[3, ], c(261.75049, 192.20833, 218.96368, 131.48), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(69, 72, 59, 50)) expect_equal(simulationResults$iterations[3, ], c(37, 12, 7, 2)) expect_equal(simulationResults$overallReject, c(0.84, 0.92, 0.98, 0.99), tolerance = 1e-07) expect_gte(object = simulationResults$overallReject[1], expected = 0) expect_gte(object = simulationResults$overallReject[2], expected = 0) expect_gte(object = simulationResults$overallReject[3], expected = 0) expect_gte(object = simulationResults$overallReject[4], expected = 0) expect_lte(object = simulationResults$overallReject[1], expected = 1) expect_lte(object = simulationResults$overallReject[2], expected = 1) expect_lte(object = simulationResults$overallReject[3], expected = 1) expect_lte(object = simulationResults$overallReject[4], expected = 1) expect_equal(simulationResults$rejectPerStage[1, ], c(0.18, 0.22, 0.39, 0.49), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.31, 0.59, 0.52, 0.48), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.35, 0.11, 0.07, 0.02), tolerance = 1e-07) expect_equal(simulationResults$futilityStop, c(0.14, 0.07, 0.02, 0.01), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[1, ], c(0.13, 0.06, 0.02, 0.01), tolerance = 1e-07) expect_equal(simulationResults$futilityPerStage[2, ], c(0.01, 0.01, 0, 0), tolerance = 1e-07) expect_equal(simulationResults$earlyStop, c(0.63, 0.88, 0.93, 0.98), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, c(429.00559, 423.54913, 384.3886, 348.2482), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(137.13, 85.39, 72.76, 57.98), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.46273079, 0.58305775, 0.61313502, 0.59484117), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.66165116, 0.75066235, 0.71981679, 0.8), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResults), NA))) expect_output(print(simulationResults)$show()) invisible(capture.output(expect_error(summary(simulationResults), NA))) expect_output(summary(simulationResults)$show()) simulationResultsCodeBased <- eval(parse(text = getObjectRCode(simulationResults, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultsCodeBased$median1, simulationResults$median1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$median2, simulationResults$median2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$accrualIntensity, simulationResults$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda1, simulationResults$lambda1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda2, simulationResults$lambda2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$hazardRatio, simulationResults$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$analysisTime, simulationResults$analysisTime, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$studyDuration, simulationResults$studyDuration, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsNotAchieved, simulationResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$numberOfSubjects, simulationResults$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsPerStage, simulationResults$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallEventsPerStage, simulationResults$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$iterations, simulationResults$iterations, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallReject, simulationResults$overallReject, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$rejectPerStage, simulationResults$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityStop, simulationResults$futilityStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityPerStage, simulationResults$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$earlyStop, simulationResults$earlyStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfSubjects, simulationResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfEvents, simulationResults$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$conditionalPowerAchieved, simulationResults$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResults), "character") df <- as.data.frame(simulationResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': configuration 4", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} # @refFS[Formula]{fs:testStatisticGroupSequential} design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list( "<6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007 ) simulationResults <- getSimulationSurvival( design = design, directionUpper = TRUE, maxNumberOfSubjects = 500, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 1.7, accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 200), maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results expect_equal(simulationResults$accrualIntensity, c(23.809524, 47.619048, 47.619048), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.0425, 0.068, 0.0255, 0.017, 0.0119), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[1, ], 6.3619038, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 12.345684, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 36.687962, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 19.26207, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0) expect_equal(simulationResults$eventsNotAchieved[3, ], 0) expect_equal(simulationResults$numberOfSubjects[1, ], 231.41, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 448.23158, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], 491.66667, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 71.694737, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 111.76667, tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[1, ], 20) expect_equal(simulationResults$overallEventsPerStage[2, ], 91.694737, tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[3, ], 203.4614, tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 95) expect_equal(simulationResults$iterations[3, ], 30) expect_equal(simulationResults$overallReject, 0.99, tolerance = 1e-07) expect_gte(object = simulationResults$overallReject[1], expected = 0) expect_lte(object = simulationResults$overallReject[1], expected = 1) expect_equal(simulationResults$rejectPerStage[1, ], 0.05, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], 0.65, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.29, tolerance = 1e-07) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$earlyStop, 0.7, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, 450.42103, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 121.64, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.49425129, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.73157546, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResults), NA))) expect_output(print(simulationResults)$show()) invisible(capture.output(expect_error(summary(simulationResults), NA))) expect_output(summary(simulationResults)$show()) simulationResultsCodeBased <- eval(parse(text = getObjectRCode(simulationResults, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultsCodeBased$accrualIntensity, simulationResults$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda1, simulationResults$lambda1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$analysisTime, simulationResults$analysisTime, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$studyDuration, simulationResults$studyDuration, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsNotAchieved, simulationResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$numberOfSubjects, simulationResults$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsPerStage, simulationResults$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallEventsPerStage, simulationResults$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$iterations, simulationResults$iterations, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallReject, simulationResults$overallReject, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$rejectPerStage, simulationResults$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityStop, simulationResults$futilityStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityPerStage, simulationResults$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$earlyStop, simulationResults$earlyStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfSubjects, simulationResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfEvents, simulationResults$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$conditionalPowerAchieved, simulationResults$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResults), "character") df <- as.data.frame(simulationResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': configuration 5", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} # @refFS[Formula]{fs:testStatisticGroupSequential} design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) simulationResults <- getSimulationSurvival( design = design, pi2 = 0.6, pi1 = seq(0.3, 0.45, 0.05), directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 40, 40), maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results expect_equal(simulationResults$median1, c(23.320299, 19.308487, 16.282985, 13.9131), tolerance = 1e-07) expect_equal(simulationResults$median2, 9.0776496, tolerance = 1e-07) expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.029722912, 0.035898576, 0.042568802, 0.04981975), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.076357561, tolerance = 1e-07) expect_equal(simulationResults$hazardRatio, c(0.38925958, 0.47013781, 0.55749295, 0.6524534), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[1, ], c(8.1674426, 7.9228743, 7.6045868, 7.4881493), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(12.354338, 12.56529, 12.380125, 12.254955), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(16.473595, 17.9949, 17.847597, 17.390492), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(12.562909, 13.818364, 15.044701, 16.144285), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0, 0, 0, 0)) expect_equal(simulationResults$numberOfSubjects[1, ], c(126.03, 121.42, 115.37, 113.16), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(187.50575, 190.98876, 193.16304, 192.33), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(199.11111, 200, 199.39655, 199.28571), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(28.54023, 31.561798, 35.130435, 35.79), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(26.388889, 33.558824, 37.155172, 37.792208), tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[1, ], c(20, 20, 20, 20)) expect_equal(simulationResults$overallEventsPerStage[2, ], c(48.54023, 51.561798, 55.130435, 55.79), tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[3, ], c(74.929119, 85.120621, 92.285607, 93.582208), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(87, 89, 92, 100)) expect_equal(simulationResults$iterations[3, ], c(18, 34, 58, 77)) expect_equal(simulationResults$overallReject, c(0.99, 0.97, 0.68, 0.48), tolerance = 1e-07) expect_gte(object = simulationResults$overallReject[1], expected = 0) expect_gte(object = simulationResults$overallReject[2], expected = 0) expect_gte(object = simulationResults$overallReject[3], expected = 0) expect_gte(object = simulationResults$overallReject[4], expected = 0) expect_lte(object = simulationResults$overallReject[1], expected = 1) expect_lte(object = simulationResults$overallReject[2], expected = 1) expect_lte(object = simulationResults$overallReject[3], expected = 1) expect_lte(object = simulationResults$overallReject[4], expected = 1) expect_equal(simulationResults$rejectPerStage[1, ], c(0.13, 0.11, 0.08, 0), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.69, 0.55, 0.34, 0.23), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.17, 0.31, 0.26, 0.25), tolerance = 1e-07) expect_equal(simulationResults$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResults$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$earlyStop, c(0.82, 0.66, 0.42, 0.23), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, c(181.60287, 186.40002, 190.55503, 197.6859), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(49.58, 59.5, 73.87, 84.89), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.56161185, 0.47418383, 0.31608317, 0.29578133), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.71394365, 0.57778506, 0.37448609, 0.32265113), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResults), NA))) expect_output(print(simulationResults)$show()) invisible(capture.output(expect_error(summary(simulationResults), NA))) expect_output(summary(simulationResults)$show()) simulationResultsCodeBased <- eval(parse(text = getObjectRCode(simulationResults, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultsCodeBased$median1, simulationResults$median1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$median2, simulationResults$median2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$accrualIntensity, simulationResults$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda1, simulationResults$lambda1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda2, simulationResults$lambda2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$hazardRatio, simulationResults$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$analysisTime, simulationResults$analysisTime, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$studyDuration, simulationResults$studyDuration, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsNotAchieved, simulationResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$numberOfSubjects, simulationResults$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsPerStage, simulationResults$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallEventsPerStage, simulationResults$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$iterations, simulationResults$iterations, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallReject, simulationResults$overallReject, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$rejectPerStage, simulationResults$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityStop, simulationResults$futilityStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityPerStage, simulationResults$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$earlyStop, simulationResults$earlyStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfSubjects, simulationResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfEvents, simulationResults$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$conditionalPowerAchieved, simulationResults$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResults), "character") df <- as.data.frame(simulationResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': configuration 6", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} # @refFS[Formula]{fs:testStatisticGroupSequential} design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007 ) suppressWarnings(simulationResults <- getSimulationSurvival( design = design, directionUpper = FALSE, maxNumberOfSubjects = 260, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 0.8, accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 400, 200), maxNumberOfIterations = 100, seed = 1234567890 )) ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results expect_equal(simulationResults$accrualIntensity, c(12.380952, 24.761905, 24.761905), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.02, 0.032, 0.012, 0.008, 0.0056), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[1, ], 9.8001583, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 134.1032, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 189.74226, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 39.514056, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0.62, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[3, ], 0.18, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[1, ], 205.17, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 258.63158, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], 258.76923, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 105.21053, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 97.307692, tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[1, ], 20) expect_equal(simulationResults$overallEventsPerStage[2, ], 125.21053, tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[3, ], 222.51822, tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 38) expect_equal(simulationResults$iterations[3, ], 13) expect_equal(simulationResults$overallReject, 0.1, tolerance = 1e-07) expect_gte(object = simulationResults$overallReject[1], expected = 0) expect_lte(object = simulationResults$overallReject[1], expected = 1) expect_equal(simulationResults$rejectPerStage[1, ], 0) expect_equal(simulationResults$rejectPerStage[2, ], 0.07, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.03, tolerance = 1e-07) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$earlyStop, 0.07, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, 258.7596, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 215.70668, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.80033324, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.64354689, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResults), NA))) expect_output(print(simulationResults)$show()) invisible(capture.output(expect_error(summary(simulationResults), NA))) expect_output(summary(simulationResults)$show()) suppressWarnings(simulationResultsCodeBased <- eval(parse(text = getObjectRCode(simulationResults, stringWrapParagraphWidth = NULL)))) expect_equal(simulationResultsCodeBased$accrualIntensity, simulationResults$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda1, simulationResults$lambda1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$analysisTime, simulationResults$analysisTime, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$studyDuration, simulationResults$studyDuration, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsNotAchieved, simulationResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$numberOfSubjects, simulationResults$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsPerStage, simulationResults$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallEventsPerStage, simulationResults$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$iterations, simulationResults$iterations, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallReject, simulationResults$overallReject, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$rejectPerStage, simulationResults$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityStop, simulationResults$futilityStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityPerStage, simulationResults$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$earlyStop, simulationResults$earlyStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfSubjects, simulationResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfEvents, simulationResults$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$conditionalPowerAchieved, simulationResults$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResults), "character") df <- as.data.frame(simulationResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } expect_warning( getSimulationSurvival( design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 0.8, accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 400, 200), maxNumberOfIterations = 100, seed = 1234567890 ), paste0( "Presumably due to drop-outs, required number of events were not achieved for at least one situation. ", "Increase the maximum number of subjects (200) to avoid this situation" ), fixed = TRUE ) expect_warning( getSimulationSurvival( design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = 0.8, accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 400, 200), maxNumberOfIterations = 100, seed = 1234567890 ), paste0( "Presumably due to drop-outs, required number of events were not achieved for at least one situation. ", "Increase the maximum number of subjects (200) to avoid this situation" ), fixed = TRUE ) expect_warning( getSimulationSurvival( piecewiseSurvivalTime = list("<6" = 1.7, "6 - Inf" = 1.2), hazardRatio = c(0.65, 0.7, 0.8), plannedEvents = 98, maxNumberOfSubjects = 120, directionUpper = FALSE, maxNumberOfIterations = 1000, sided = 1, alpha = 0.1 ), paste0( "Only the first 'hazardRatio' (0.65) was used for piecewise survival time definition ", "(use a loop over the function to simulate different hazard ratios)" ), fixed = TRUE ) }) test_that("'getSimulationSurvival': configuration 7", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} # @refFS[Formula]{fs:testStatisticGroupSequential} design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) simulationResults <- getSimulationSurvival( design = design, directionUpper = FALSE, maxNumberOfSubjects = 260, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = list("0 - ?" = 0.025), hazardRatio = 0.8, accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 100), maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results expect_equal(simulationResults$median1, 34.657359, tolerance = 1e-07) expect_equal(simulationResults$median2, 27.725887, tolerance = 1e-07) expect_equal(simulationResults$accrualIntensity, c(12.380952, 24.761905, 24.761905), tolerance = 1e-07) expect_equal(simulationResults$lambda1, 0.02, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[1, ], 10.071413, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 31.014645, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 78.484045, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 75.086051, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0) expect_equal(simulationResults$eventsNotAchieved[3, ], 0) expect_equal(simulationResults$numberOfSubjects[1, ], 211.81, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 259.98, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], 260) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 87.53, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 93.376344, tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[1, ], 20) expect_equal(simulationResults$overallEventsPerStage[2, ], 107.53, tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[3, ], 200.90634, tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 100) expect_equal(simulationResults$iterations[3, ], 93) expect_equal(simulationResults$overallReject, 0.26, tolerance = 1e-07) expect_gte(object = simulationResults$overallReject[1], expected = 0) expect_lte(object = simulationResults$overallReject[1], expected = 1) expect_equal(simulationResults$rejectPerStage[1, ], 0) expect_equal(simulationResults$rejectPerStage[2, ], 0.07, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.19, tolerance = 1e-07) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$earlyStop, 0.07, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, 259.9986, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 194.37, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.26815489, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.24457773, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResults), NA))) expect_output(print(simulationResults)$show()) invisible(capture.output(expect_error(summary(simulationResults), NA))) expect_output(summary(simulationResults)$show()) simulationResultsCodeBased <- eval(parse(text = getObjectRCode(simulationResults, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultsCodeBased$median1, simulationResults$median1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$median2, simulationResults$median2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$accrualIntensity, simulationResults$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda1, simulationResults$lambda1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$analysisTime, simulationResults$analysisTime, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$studyDuration, simulationResults$studyDuration, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsNotAchieved, simulationResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$numberOfSubjects, simulationResults$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsPerStage, simulationResults$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallEventsPerStage, simulationResults$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$iterations, simulationResults$iterations, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallReject, simulationResults$overallReject, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$rejectPerStage, simulationResults$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityStop, simulationResults$futilityStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityPerStage, simulationResults$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$earlyStop, simulationResults$earlyStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfSubjects, simulationResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfEvents, simulationResults$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$conditionalPowerAchieved, simulationResults$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResults), "character") df <- as.data.frame(simulationResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': configuration 8", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} # @refFS[Formula]{fs:testStatisticGroupSequential} design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) simulationResults <- getSimulationSurvival( design = design, directionUpper = FALSE, maxNumberOfSubjects = 200, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = 0.8, accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0.04, dropoutRate2 = 0.08, dropoutTime = 12, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results expect_equal(simulationResults$accrualIntensity, c(9.5238095, 19.047619, 19.047619), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.008, 0.024), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[1, ], 14.155697, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 19.508242, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 25.008056, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 24.627971, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0) expect_equal(simulationResults$eventsNotAchieved[3, ], 0) expect_equal(simulationResults$numberOfSubjects[1, ], 199.73, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 200) expect_equal(simulationResults$numberOfSubjects[3, ], 200) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 20) expect_equal(simulationResults$eventsPerStage[3, ], 20) expect_equal(simulationResults$overallEventsPerStage[1, ], 20) expect_equal(simulationResults$overallEventsPerStage[2, ], 40) expect_equal(simulationResults$overallEventsPerStage[3, ], 60) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 99) expect_equal(simulationResults$iterations[3, ], 95) expect_equal(simulationResults$overallReject, 0.11, tolerance = 1e-07) expect_gte(object = simulationResults$overallReject[1], expected = 0) expect_lte(object = simulationResults$overallReject[1], expected = 1) expect_equal(simulationResults$rejectPerStage[1, ], 0.01, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], 0.04, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.06, tolerance = 1e-07) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$earlyStop, 0.05, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, 199.9973, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 58.8, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.13387917, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.12806393, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResults), NA))) expect_output(print(simulationResults)$show()) invisible(capture.output(expect_error(summary(simulationResults), NA))) expect_output(summary(simulationResults)$show()) simulationResultsCodeBased <- eval(parse(text = getObjectRCode(simulationResults, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultsCodeBased$accrualIntensity, simulationResults$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda1, simulationResults$lambda1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$analysisTime, simulationResults$analysisTime, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$studyDuration, simulationResults$studyDuration, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsNotAchieved, simulationResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$numberOfSubjects, simulationResults$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsPerStage, simulationResults$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallEventsPerStage, simulationResults$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$iterations, simulationResults$iterations, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallReject, simulationResults$overallReject, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$rejectPerStage, simulationResults$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityStop, simulationResults$futilityStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityPerStage, simulationResults$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$earlyStop, simulationResults$earlyStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfSubjects, simulationResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfEvents, simulationResults$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$conditionalPowerAchieved, simulationResults$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResults), "character") df <- as.data.frame(simulationResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': configuration 9;", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} # @refFS[Formula]{fs:testStatisticGroupSequential} design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) simulationResults <- getSimulationSurvival( design = design, directionUpper = FALSE, maxNumberOfSubjects = 260, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), piecewiseSurvivalTime = c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = c(0.75), accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 100), maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results expect_equal(simulationResults$accrualIntensity, c(12.380952, 24.761905, 24.761905), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.0075, 0.0225), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[1, ], 12.905156, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], 31.363371, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], 71.176717, tolerance = 1e-07) expect_equal(simulationResults$studyDuration, 65.836001, tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], 0) expect_equal(simulationResults$eventsNotAchieved[2, ], 0) expect_equal(simulationResults$eventsNotAchieved[3, ], 0) expect_equal(simulationResults$numberOfSubjects[1, ], 257.27, tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], 260) expect_equal(simulationResults$numberOfSubjects[3, ], 260) expect_equal(simulationResults$eventsPerStage[1, ], 20) expect_equal(simulationResults$eventsPerStage[2, ], 86.161616, tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], 89.574713, tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[1, ], 20) expect_equal(simulationResults$overallEventsPerStage[2, ], 106.16162, tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[3, ], 195.73633, tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], 100) expect_equal(simulationResults$iterations[2, ], 99) expect_equal(simulationResults$iterations[3, ], 87) expect_equal(simulationResults$overallReject, 0.47, tolerance = 1e-07) expect_gte(object = simulationResults$overallReject[1], expected = 0) expect_lte(object = simulationResults$overallReject[1], expected = 1) expect_equal(simulationResults$rejectPerStage[1, ], 0.01, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], 0.12, tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], 0.34, tolerance = 1e-07) expect_equal(simulationResults$futilityStop, 0) expect_equal(simulationResults$futilityPerStage[1, ], 0) expect_equal(simulationResults$futilityPerStage[2, ], 0) expect_equal(simulationResults$earlyStop, 0.13, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, 259.9727, tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, 183.23, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResults$conditionalPowerAchieved[2, ], 0.28641702, tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], 0.33103011, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResults), NA))) expect_output(print(simulationResults)$show()) invisible(capture.output(expect_error(summary(simulationResults), NA))) expect_output(summary(simulationResults)$show()) simulationResultsCodeBased <- eval(parse(text = getObjectRCode(simulationResults, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultsCodeBased$accrualIntensity, simulationResults$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda1, simulationResults$lambda1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$analysisTime, simulationResults$analysisTime, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$studyDuration, simulationResults$studyDuration, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsNotAchieved, simulationResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$numberOfSubjects, simulationResults$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsPerStage, simulationResults$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallEventsPerStage, simulationResults$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$iterations, simulationResults$iterations, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallReject, simulationResults$overallReject, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$rejectPerStage, simulationResults$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityStop, simulationResults$futilityStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityPerStage, simulationResults$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$earlyStop, simulationResults$earlyStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfSubjects, simulationResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfEvents, simulationResults$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$conditionalPowerAchieved, simulationResults$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResults), "character") df <- as.data.frame(simulationResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': configuration 10;", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} # @refFS[Formula]{fs:testStatisticGroupSequential} design <- getDesignGroupSequential(kMax = 3, typeOfDesign = "WT", deltaWT = 0.25) simulationResults <- getSimulationSurvival( design = design, directionUpper = FALSE, maxNumberOfSubjects = 260, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), lambda2 = 0.03, hazardRatio = c(0.75, 0.8, 0.9), accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 100), maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results expect_equal(simulationResults$pi1, c(0.23662051, 0.25023841, 0.27674976), tolerance = 1e-07) expect_equal(simulationResults$pi2, 0.30232367, tolerance = 1e-07) expect_equal(simulationResults$median1, c(30.806541, 28.881133, 25.672118), tolerance = 1e-07) expect_equal(simulationResults$median2, 23.104906, tolerance = 1e-07) expect_equal(simulationResults$accrualIntensity, c(12.380952, 24.761905, 24.761905), tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.0225, 0.024, 0.027), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[1, ], c(9.4112305, 9.2753297, 9.1968922), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(27.054738, 27.519552, 26.652741), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(67.286427, 67.154864, 68.163763), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(61.651041, 62.169663, 64.720225), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0, 0, 0)) expect_equal(simulationResults$numberOfSubjects[1, ], c(195.58, 192.19, 190.21), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(258.86, 259.77778, 259.64646), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(260, 260, 260)) expect_equal(simulationResults$eventsPerStage[1, ], c(20, 20, 20)) expect_equal(simulationResults$eventsPerStage[2, ], c(85.16, 89.353535, 92.363636), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(92, 90.181818, 98.623656), tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[1, ], c(20, 20, 20)) expect_equal(simulationResults$overallEventsPerStage[2, ], c(105.16, 109.35354, 112.36364), tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[3, ], c(197.16, 199.53535, 210.98729), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(100, 100, 100)) expect_equal(simulationResults$iterations[2, ], c(100, 99, 99)) expect_equal(simulationResults$iterations[3, ], c(86, 88, 93)) expect_equal(simulationResults$overallReject, c(0.46, 0.36, 0.13), tolerance = 1e-07) expect_gte(object = simulationResults$overallReject[1], expected = 0) expect_gte(object = simulationResults$overallReject[2], expected = 0) expect_gte(object = simulationResults$overallReject[3], expected = 0) expect_lte(object = simulationResults$overallReject[1], expected = 1) expect_lte(object = simulationResults$overallReject[2], expected = 1) expect_lte(object = simulationResults$overallReject[3], expected = 1) expect_equal(simulationResults$rejectPerStage[1, ], c(0, 0.01, 0.01), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0.14, 0.11, 0.06), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.32, 0.24, 0.06), tolerance = 1e-07) expect_equal(simulationResults$futilityStop, c(0, 0, 0)) expect_equal(simulationResults$futilityPerStage[1, ], c(0, 0, 0)) expect_equal(simulationResults$futilityPerStage[2, ], c(0, 0, 0)) expect_equal(simulationResults$earlyStop, c(0.14, 0.12, 0.07), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, c(259.8404, 259.29746, 259.28089), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(184.28, 187.82, 203.16), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.30728214, 0.23928832, 0.1863817), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.33344952, 0.28614054, 0.14302818), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResults), NA))) expect_output(print(simulationResults)$show()) invisible(capture.output(expect_error(summary(simulationResults), NA))) expect_output(summary(simulationResults)$show()) simulationResultsCodeBased <- eval(parse(text = getObjectRCode(simulationResults, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultsCodeBased$pi1, simulationResults$pi1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$pi2, simulationResults$pi2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$median1, simulationResults$median1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$median2, simulationResults$median2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$accrualIntensity, simulationResults$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda1, simulationResults$lambda1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$analysisTime, simulationResults$analysisTime, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$studyDuration, simulationResults$studyDuration, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsNotAchieved, simulationResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$numberOfSubjects, simulationResults$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsPerStage, simulationResults$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallEventsPerStage, simulationResults$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$iterations, simulationResults$iterations, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallReject, simulationResults$overallReject, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$rejectPerStage, simulationResults$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityStop, simulationResults$futilityStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityPerStage, simulationResults$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$earlyStop, simulationResults$earlyStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfSubjects, simulationResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfEvents, simulationResults$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$conditionalPowerAchieved, simulationResults$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResults), "character") df <- as.data.frame(simulationResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': configuration 11;", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:testStatisticGroupSequential} design <- getDesignInverseNormal(informationRates = c(0.4, 0.7, 1)) myCalcEventsFunction <- function(..., stage, conditionalPower, estimatedTheta, plannedEvents, eventsOverStages, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, conditionalCriticalValue) { theta <- max(1 + 1e-12, estimatedTheta) if (stage == 2) { requiredStageEvents <- max(0, conditionalCriticalValue + qnorm(conditionalPower))^2 / log(theta)^2 requiredStageEvents <- min( max(minNumberOfEventsPerStage[stage], requiredStageEvents), maxNumberOfEventsPerStage[stage] ) + eventsOverStages[stage - 1] } else { requiredStageEvents <- 2 * eventsOverStages[stage - 1] - eventsOverStages[1] } return(requiredStageEvents) } simulationResults <- getSimulationSurvival( design = design, hazardRatio = seq(1, 2.6, 0.5), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA, 44, 4), maxNumberOfEventsPerStage = 4 * c(NA, 44, 4), maxNumberOfSubjects = 800, calcEventsFunction = myCalcEventsFunction, seed = 1234567890, maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResults' with expected results expect_equal(simulationResults$pi1, c(0.3, 0.41433798, 0.51, 0.59003659), tolerance = 1e-07) expect_equal(simulationResults$median1, c(23.320299, 15.546866, 11.660149, 9.3281194), tolerance = 1e-07) expect_equal(simulationResults$median2, 23.320299, tolerance = 1e-07) expect_equal(simulationResults$accrualIntensity, 66.666667, tolerance = 1e-07) expect_equal(simulationResults$lambda1, c(0.029722912, 0.044584368, 0.059445824, 0.07430728), tolerance = 1e-07) expect_equal(simulationResults$lambda2, 0.029722912, tolerance = 1e-07) expect_equal(simulationResults$analysisTime[1, ], c(7.9897023, 7.1475519, 6.494373, 6.2294549), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[2, ], c(17.213741, 10.852607, 9.0020285, 8.4492782), tolerance = 1e-07) expect_equal(simulationResults$analysisTime[3, ], c(28.255907, 20.342199, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(simulationResults$studyDuration, c(28.255907, 12.725375, 7.9469034, 7.3207992), tolerance = 1e-07) expect_equal(simulationResults$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$eventsNotAchieved[3, ], c(0, 0, 0, 0)) expect_equal(simulationResults$numberOfSubjects[1, ], c(532.2, 476.1, 432.4, 414.9), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[2, ], c(788.2, 681.33333, 599.66667, 562.8), tolerance = 1e-07) expect_equal(simulationResults$numberOfSubjects[3, ], c(800, 778.66667, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[1, ], c(58, 58, 58, 58)) expect_equal(simulationResults$eventsPerStage[2, ], c(162.8, 74.888889, 46, 44), tolerance = 1e-07) expect_equal(simulationResults$eventsPerStage[3, ], c(162.8, 132, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[1, ], c(58, 58, 58, 58)) expect_equal(simulationResults$overallEventsPerStage[2, ], c(220.8, 132.88889, 104, 102), tolerance = 1e-07) expect_equal(simulationResults$overallEventsPerStage[3, ], c(383.6, 264.88889, 104, 102), tolerance = 1e-07) expect_equal(simulationResults$iterations[1, ], c(10, 10, 10, 10)) expect_equal(simulationResults$iterations[2, ], c(10, 9, 6, 5)) expect_equal(simulationResults$iterations[3, ], c(10, 3, 0, 0)) expect_equal(simulationResults$overallReject, c(0.1, 1, 1, 1), tolerance = 1e-07) expect_gte(object = simulationResults$overallReject[1], expected = 0) expect_gte(object = simulationResults$overallReject[2], expected = 0) expect_gte(object = simulationResults$overallReject[3], expected = 0) expect_gte(object = simulationResults$overallReject[4], expected = 0) expect_lte(object = simulationResults$overallReject[1], expected = 1) expect_lte(object = simulationResults$overallReject[2], expected = 1) expect_lte(object = simulationResults$overallReject[3], expected = 1) expect_lte(object = simulationResults$overallReject[4], expected = 1) expect_equal(simulationResults$rejectPerStage[1, ], c(0, 0.1, 0.4, 0.5), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[2, ], c(0, 0.6, 0.6, 0.5), tolerance = 1e-07) expect_equal(simulationResults$rejectPerStage[3, ], c(0.1, 0.3, 0, 0), tolerance = 1e-07) expect_equal(simulationResults$futilityStop, c(0, 0, 0, 0)) expect_equal(simulationResults$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simulationResults$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simulationResults$earlyStop, c(0, 0.7, 1, 1), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfSubjects, c(800, 690.01, 532.76, 488.85), tolerance = 1e-07) expect_equal(simulationResults$expectedNumberOfEvents, c(383.6, 165, 85.6, 80), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simulationResults$conditionalPowerAchieved[2, ], c(0.044313374, 0.60865148, 0.72866566, 0.82381582), tolerance = 1e-07) expect_equal(simulationResults$conditionalPowerAchieved[3, ], c(0.013892246, 0.84869906, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResults), NA))) expect_output(print(simulationResults)$show()) invisible(capture.output(expect_error(summary(simulationResults), NA))) expect_output(summary(simulationResults)$show()) simulationResultsCodeBased <- eval(parse(text = getObjectRCode(simulationResults, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultsCodeBased$pi1, simulationResults$pi1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$median1, simulationResults$median1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$median2, simulationResults$median2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$accrualIntensity, simulationResults$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda1, simulationResults$lambda1, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$lambda2, simulationResults$lambda2, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$analysisTime, simulationResults$analysisTime, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$studyDuration, simulationResults$studyDuration, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsNotAchieved, simulationResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$numberOfSubjects, simulationResults$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$eventsPerStage, simulationResults$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallEventsPerStage, simulationResults$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$iterations, simulationResults$iterations, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$overallReject, simulationResults$overallReject, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$rejectPerStage, simulationResults$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityStop, simulationResults$futilityStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$futilityPerStage, simulationResults$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$earlyStop, simulationResults$earlyStop, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfSubjects, simulationResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$expectedNumberOfEvents, simulationResults$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultsCodeBased$conditionalPowerAchieved, simulationResults$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResults), "character") df <- as.data.frame(simulationResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': test accrual time and intensity definition", { .skipTestIfDisabled() maxNumberOfSubjects <- getSimulationSurvival( plannedEvents = 100, accrualTime = c(0, 6, 12), accrualIntensity = c(22, 33), maxNumberOfIterations = 100 )$maxNumberOfSubjects expect_equal(maxNumberOfSubjects, 330) accrualIntensity <- getSimulationSurvival( plannedEvents = 100, accrualTime = c(0, 6, 12), accrualIntensity = c(0.2, 0.3), maxNumberOfSubjects = 330, maxNumberOfIterations = 100, seed = 1234567890 )$accrualIntensity expect_equal(accrualIntensity, c(22, 33)) }) test_that("'getSimulationSurvival': test expected warnings and errors", { # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} .skipTestIfDisabled() dIN <- getDesignInverseNormal(informationRates = c(0.4, 0.7, 1)) expect_warning( getSimulationSurvival( design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ), "'minNumberOfEventsPerStage' (NA, 44, 44) will be ignored because 'conditionalPower' is not defined", fixed = TRUE ) expect_warning( getSimulationSurvival( design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 146), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ), "'maxNumberOfEventsPerStage' (NA, 176, 176) will be ignored because 'conditionalPower' is not defined", fixed = TRUE ) expect_error( getSimulationSurvival( design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ), "Missing argument: 'minNumberOfEventsPerStage' must be defined because 'conditionalPower' is defined", fixed = TRUE ) expect_error( getSimulationSurvival( design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ), "Missing argument: 'maxNumberOfEventsPerStage' must be defined because 'conditionalPower' is defined", fixed = TRUE ) expect_error( getSimulationSurvival( design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = -0.1, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ), "Argument out of bounds: 'conditionalPower' (-0.1) is out of bounds (0; 1)", fixed = TRUE ) expect_error( getSimulationSurvival( design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 1.1, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ), "Argument out of bounds: 'conditionalPower' (1.1) is out of bounds (0; 1)", fixed = TRUE ) expect_error( getSimulationSurvival( plannedEvents = -100, accrualTime = c(0, 6, 12), accrualIntensity = c(22, 33), maxNumberOfIterations = 100, seed = 1234567890 ), "Argument out of bounds: 'plannedEvents' (-100) must be >= 1", fixed = TRUE ) expect_error( getSimulationSurvival( design = dIN, plannedEvents = c(100, 100, 150), accrualTime = c(0, 6, 12), accrualIntensity = c(22, 33), maxNumberOfIterations = 100, seed = 1234567890 ), "Illegal argument: 'plannedEvents' (100, 100, 150) must be strictly increasing: x_1 < .. < x_3", fixed = TRUE ) expect_error( getSimulationSurvival( design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, -44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ), "Argument out of bounds: each value of 'minNumberOfEventsPerStage' (58, 44, -44) must be >= 1", fixed = TRUE ) expect_error( getSimulationSurvival( design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 10, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ), "Illegal argument: 'maxNumberOfEventsPerStage' (58, 40, 176) must be not smaller than minNumberOfEventsPerStage' (58, 44, 44)", fixed = TRUE ) expect_error(getSimulationSurvival(plannedEvents = 100, maxNumberOfIterations = 100, seed = 1234567890), "Illegal argument: 'maxNumberOfSubjects' must be defined", fixed = TRUE ) expect_error( getSimulationSurvival( plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, thetaH1 = 0, maxNumberOfIterations = 100, seed = 1234567890 ), "Argument out of bounds: 'thetaH1' (0) must be > 0", fixed = TRUE ) expect_error( getSimulationSurvival( plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, conditionalPower = 0, maxNumberOfIterations = 100, seed = 1234567890 ), "Argument out of bounds: 'conditionalPower' (0) is out of bounds (0; 1)", fixed = TRUE ) expect_error( getSimulationSurvival( plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, conditionalPower = 1, maxNumberOfIterations = 100, seed = 1234567890 ), "Argument out of bounds: 'conditionalPower' (1) is out of bounds (0; 1)", fixed = TRUE ) expect_error( getSimulationSurvival( plannedEvents = 100, accrualTime = c(0, 12), accrualIntensity = 20, conditionalPower = c(0.5, 0.8), maxNumberOfIterations = 100, seed = 1234567890 ), "Illegal argument: 'conditionalPower' c(0.5, 0.8) must be a single numeric value", fixed = TRUE ) }) test_plan_section("Testing the Simulation of Survival Data for Different Parameter Variants") test_that("'getSimulationSurvival': Fixed sample size with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingTestingOneHypothesis} # @refFS[Tab.]{fs:tab:output:getSimulationSurvival} # @refFS[Formula]{fs:simulationSurvivalTimeGenerate} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:simulationSurvivalLogRank} # @refFS[Formula]{fs:simulationSurvivalIncrements} # @refFS[Formula]{fs:simulationSurvivalHazardEstimate} simulationResult <- getSimulationSurvival( plannedEvents = 40, maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], c(17.941133, 15.499503, 13.535749, 12.34), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(17.941133, 15.499503, 13.535749, 12.34), tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$overallReject, c(0.01, 0.3, 0.68, 0.95), tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], c(0.01, 0.3, 0.68, 0.95), tolerance = 1e-07) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 199.71, 196.74), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$median1, simulationResult$median1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$median2, simulationResult$median2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$accrualIntensity, simulationResult$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda2, simulationResult$lambda2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$hazardRatio, simulationResult$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': Determine necessary accrual time if 200 subjects and 30 subjects per time unit can be recruited", { .skipTestIfDisabled() simulationResult <- getSimulationSurvival( plannedEvents = 40, accrualTime = 0, accrualIntensity = 30, maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], c(15.255121, 12.685136, 10.656532, 9.4294312), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(15.255121, 12.685136, 10.656532, 9.4294312), tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$overallReject, c(0.02, 0.28, 0.77, 0.96), tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], c(0.02, 0.28, 0.77, 0.96), tolerance = 1e-07) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$median1, simulationResult$median1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$median2, simulationResult$median2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda2, simulationResult$lambda2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$hazardRatio, simulationResult$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': Determine necessary accrual time if 200 subjects and if the first 6 time units 20 subjects per time unit can be recruited, then 30 subjects per time unit", { .skipTestIfDisabled() simulationResult <- getSimulationSurvival( plannedEvents = 40, accrualTime = c(0, 6), accrualIntensity = c(20, 30), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$overallReject, c(0.01, 0.28, 0.72, 0.96), tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], c(0.01, 0.28, 0.72, 0.96), tolerance = 1e-07) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$median1, simulationResult$median1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$median2, simulationResult$median2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda2, simulationResult$lambda2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$hazardRatio, simulationResult$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': Determine maximum number of Subjects if the first 6 time units 20 subjects per time unit can be recruited, and after 10 time units 30 subjects per time unit", { .skipTestIfDisabled() simulationResult <- getSimulationSurvival( plannedEvents = 40, accrualTime = c(0, 6, 10), accrualIntensity = c(20, 30), maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$maxNumberOfSubjects, 240) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$overallReject, c(0.04, 0.33, 0.75, 0.95), tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], c(0.04, 0.33, 0.75, 0.95), tolerance = 1e-07) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(240, 240, 239.63, 237.56), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$median1, simulationResult$median1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$median2, simulationResult$median2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$maxNumberOfSubjects, simulationResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda2, simulationResult$lambda2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$hazardRatio, simulationResult$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': Specify accrual time as a list", { .skipTestIfDisabled() at <- list("0 - <6" = 20, "6 - Inf" = 30) simulationResult <- getSimulationSurvival( plannedEvents = 40, accrualTime = at, maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(16.683961, 14.141068, 12.109744, 10.96314), tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$overallReject, c(0.01, 0.28, 0.72, 0.96), tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], c(0.01, 0.28, 0.72, 0.96), tolerance = 1e-07) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(200, 200, 200, 200)) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$median1, simulationResult$median1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$median2, simulationResult$median2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda2, simulationResult$lambda2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$hazardRatio, simulationResult$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': Specify accrual time as a list, if maximum number of subjects need to be calculated", { .skipTestIfDisabled() at <- list("0 - <6" = 20, "6 - <=10" = 30) simulationResult <- getSimulationSurvival( plannedEvents = 40, accrualTime = at, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$median1, c(37.275405, 23.320299, 16.282985, 12), tolerance = 1e-07) expect_equal(simulationResult$median2, 37.275405, tolerance = 1e-07) expect_equal(simulationResult$maxNumberOfSubjects, 240) expect_equal(simulationResult$lambda1, c(0.018595296, 0.029722912, 0.042568802, 0.057762265), tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1.5984103, 2.2892242, 3.1062837), tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$studyDuration, c(15.210978, 13.172199, 11.59631, 10.698373), tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], c(0, 0, 0, 0)) expect_equal(simulationResult$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simulationResult$overallReject, c(0.04, 0.33, 0.75, 0.95), tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], c(0.04, 0.33, 0.75, 0.95), tolerance = 1e-07) expect_equal(simulationResult$earlyStop, c(0, 0, 0, 0)) expect_equal(simulationResult$expectedNumberOfSubjects, c(240, 240, 239.63, 237.56), tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, c(40, 40, 40, 40)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$median1, simulationResult$median1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$median2, simulationResult$median2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$maxNumberOfSubjects, simulationResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda2, simulationResult$lambda2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$hazardRatio, simulationResult$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': Specify effect size for a two-stage group design with O'Brien & Fleming boundaries Effect size is based on event rates at specified event time, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestIfDisabled() simulationResult <- getSimulationSurvival( design = getDesignGroupSequential(kMax = 2), pi1 = 0.2, pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$median1, 74.550809, tolerance = 1e-07) expect_equal(simulationResult$median2, 46.640597, tolerance = 1e-07) expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.009297648, tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, 0.62562161, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], 14.769473, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 24.499634, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 24.198958, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 199.47, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 20) expect_equal(simulationResult$overallEventsPerStage[1, ], 20) expect_equal(simulationResult$overallEventsPerStage[2, ], 40) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 97) expect_equal(simulationResult$overallReject, 0.27, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.03, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.24, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$earlyStop, 0.03, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfSubjects, 199.9841, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 39.4, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.29516222, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$median1, simulationResult$median1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$median2, simulationResult$median2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$accrualIntensity, simulationResult$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda2, simulationResult$lambda2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$hazardRatio, simulationResult$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$numberOfSubjects, simulationResult$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsPerStage, simulationResult$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallEventsPerStage, simulationResult$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultCodeBased$conditionalPowerAchieved, simulationResult$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': As above, but with a three-stage O'Brien and Fleming design with specified information rates, note that planned events consists of integer values", { .skipTestIfDisabled() d3 <- getDesignGroupSequential(informationRates = c(0.4, 0.7, 1)) simulationResult <- getSimulationSurvival( design = d3, pi1 = 0.2, pi2 = 0.3, eventTime = 24, plannedEvents = round(d3$informationRates * 40), maxNumberOfSubjects = 200, directionUpper = FALSE, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$median1, 74.550809, tolerance = 1e-07) expect_equal(simulationResult$median2, 46.640597, tolerance = 1e-07) expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.009297648, tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, 0.62562161, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], 13.073331, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 18.748105, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[3, ], 24.810251, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 23.877826, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$eventsNotAchieved[3, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 195.313, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$numberOfSubjects[3, ], 200) expect_equal(simulationResult$eventsPerStage[1, ], 16) expect_equal(simulationResult$eventsPerStage[2, ], 12) expect_equal(simulationResult$eventsPerStage[3, ], 12) expect_equal(simulationResult$overallEventsPerStage[1, ], 16) expect_equal(simulationResult$overallEventsPerStage[2, ], 28) expect_equal(simulationResult$overallEventsPerStage[3, ], 40) expect_equal(simulationResult$iterations[1, ], 1000) expect_equal(simulationResult$iterations[2, ], 985) expect_equal(simulationResult$iterations[3, ], 861) expect_equal(simulationResult$overallReject, 0.322, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.015, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.124, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[3, ], 0.183, tolerance = 1e-07) expect_equal(simulationResult$futilityStop, 0) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$futilityPerStage[2, ], 0) expect_equal(simulationResult$earlyStop, 0.139, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfSubjects, 199.92969, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 38.152, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.19637573, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[3, ], 0.23542216, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$median1, simulationResult$median1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$median2, simulationResult$median2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$accrualIntensity, simulationResult$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda2, simulationResult$lambda2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$hazardRatio, simulationResult$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$numberOfSubjects, simulationResult$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsPerStage, simulationResult$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallEventsPerStage, simulationResult$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityStop, simulationResult$futilityStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultCodeBased$conditionalPowerAchieved, simulationResult$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': Effect size is based on event rate at specified event time for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestIfDisabled() simulationResult <- getSimulationSurvival( design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$pi1, 0.16333997, tolerance = 1e-07) expect_equal(simulationResult$median1, 93.281194, tolerance = 1e-07) expect_equal(simulationResult$median2, 46.640597, tolerance = 1e-07) expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.007430728, tolerance = 1e-07) expect_equal(simulationResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], 15.596955, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 26.310745, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 25.440402, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 199.69, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 20) expect_equal(simulationResult$overallEventsPerStage[1, ], 20) expect_equal(simulationResult$overallEventsPerStage[2, ], 40) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 92) expect_equal(simulationResult$overallReject, 0.52, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.08, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.44, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$earlyStop, 0.08, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfSubjects, 199.9752, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 38.4, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.43087375, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$pi1, simulationResult$pi1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$median1, simulationResult$median1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$median2, simulationResult$median2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$accrualIntensity, simulationResult$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda2, simulationResult$lambda2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$numberOfSubjects, simulationResult$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsPerStage, simulationResult$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallEventsPerStage, simulationResult$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultCodeBased$conditionalPowerAchieved, simulationResult$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': Effect size is based on hazard rate for the reference group and hazard ratio, directionUpper = FALSE needs to be specified because it should be shown that hazard ratio < 1", { .skipTestIfDisabled() simulationResult <- getSimulationSurvival( design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, directionUpper = FALSE, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$median1, 69.314718, tolerance = 1e-07) expect_equal(simulationResult$median2, 34.657359, tolerance = 1e-07) expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, 0.01, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], 13.132525, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 21.186744, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 20.690944, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 195.5, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 20) expect_equal(simulationResult$overallEventsPerStage[1, ], 20) expect_equal(simulationResult$overallEventsPerStage[2, ], 40) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 94) expect_equal(simulationResult$overallReject, 0.49, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.06, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.43, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$earlyStop, 0.06, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfSubjects, 199.73, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 38.8, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.48014443, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$median1, simulationResult$median1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$median2, simulationResult$median2, tolerance = 1e-05) expect_equal(simulationResultCodeBased$accrualIntensity, simulationResult$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$numberOfSubjects, simulationResult$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsPerStage, simulationResult$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallEventsPerStage, simulationResult$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultCodeBased$conditionalPowerAchieved, simulationResult$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': Specification of piecewise exponential survival time and hazard ratios, note that in getSimulationSurvival only one hazard ratio is used in the case that the survival time is piecewise exponential", { .skipTestIfDisabled() simulationResult <- getSimulationSurvival( design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), hazardRatio = 1.5, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 20) expect_equal(simulationResult$overallEventsPerStage[1, ], 20) expect_equal(simulationResult$overallEventsPerStage[2, ], 40) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$accrualIntensity, simulationResult$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$numberOfSubjects, simulationResult$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsPerStage, simulationResult$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallEventsPerStage, simulationResult$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultCodeBased$conditionalPowerAchieved, simulationResult$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) simulationResult <- getSimulationSurvival( design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 20) expect_equal(simulationResult$overallEventsPerStage[1, ], 20) expect_equal(simulationResult$overallEventsPerStage[2, ], 40) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$accrualIntensity, simulationResult$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$numberOfSubjects, simulationResult$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsPerStage, simulationResult$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallEventsPerStage, simulationResult$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultCodeBased$conditionalPowerAchieved, simulationResult$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': Specification of piecewise exponential survival time for both treatment arms", { .skipTestIfDisabled() simulationResult <- getSimulationSurvival( design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015, 0.03, 0.06), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, 1.5, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 20) expect_equal(simulationResult$overallEventsPerStage[1, ], 20) expect_equal(simulationResult$overallEventsPerStage[2, ], 40) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$accrualIntensity, simulationResult$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultCodeBased$hazardRatio, simulationResult$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$numberOfSubjects, simulationResult$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsPerStage, simulationResult$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallEventsPerStage, simulationResult$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultCodeBased$conditionalPowerAchieved, simulationResult$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) simulationResult <- getSimulationSurvival( design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = 1.5, plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$lambda1, c(0.015, 0.03, 0.06), tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], 12.106711, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 16.150578, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 16.020702, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 193.51, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 20) expect_equal(simulationResult$overallEventsPerStage[1, ], 20) expect_equal(simulationResult$overallEventsPerStage[2, ], 40) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 96) expect_equal(simulationResult$overallReject, 0.32, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0.04, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[2, ], 0.28, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$earlyStop, 0.04, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfSubjects, 199.7404, tolerance = 1e-07) expect_equal(simulationResult$expectedNumberOfEvents, 39.2, tolerance = 1e-07) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.33404702, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$accrualIntensity, simulationResult$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultCodeBased$lambda1, simulationResult$lambda1, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$numberOfSubjects, simulationResult$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsPerStage, simulationResult$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallEventsPerStage, simulationResult$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultCodeBased$conditionalPowerAchieved, simulationResult$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } simulationResult <- getSimulationSurvival( design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.01, 0.02, 0.06), plannedEvents = c(20, 40), maxNumberOfSubjects = 200, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'simulationResult' with expected results expect_equal(simulationResult$accrualIntensity, 16.666667, tolerance = 1e-07) expect_equal(simulationResult$hazardRatio, c(1, 1, 1.5), tolerance = 1e-07) expect_equal(simulationResult$analysisTime[1, ], 12.973056, tolerance = 1e-07) expect_equal(simulationResult$analysisTime[2, ], 17.030809, tolerance = 1e-07) expect_equal(simulationResult$studyDuration, 17.030809, tolerance = 1e-07) expect_equal(simulationResult$eventsNotAchieved[1, ], 0) expect_equal(simulationResult$eventsNotAchieved[2, ], 0) expect_equal(simulationResult$numberOfSubjects[1, ], 197.81, tolerance = 1e-07) expect_equal(simulationResult$numberOfSubjects[2, ], 200) expect_equal(simulationResult$eventsPerStage[1, ], 20) expect_equal(simulationResult$eventsPerStage[2, ], 20) expect_equal(simulationResult$overallEventsPerStage[1, ], 20) expect_equal(simulationResult$overallEventsPerStage[2, ], 40) expect_equal(simulationResult$iterations[1, ], 100) expect_equal(simulationResult$iterations[2, ], 100) expect_equal(simulationResult$overallReject, 0.06, tolerance = 1e-07) expect_equal(simulationResult$rejectPerStage[1, ], 0) expect_equal(simulationResult$rejectPerStage[2, ], 0.06, tolerance = 1e-07) expect_equal(simulationResult$futilityPerStage[1, ], 0) expect_equal(simulationResult$earlyStop, 0) expect_equal(simulationResult$expectedNumberOfSubjects, 200) expect_equal(simulationResult$expectedNumberOfEvents, 40) expect_equal(simulationResult$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simulationResult$conditionalPowerAchieved[2, ], 0.1789388, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simulationResult), NA))) expect_output(print(simulationResult)$show()) invisible(capture.output(expect_error(summary(simulationResult), NA))) expect_output(summary(simulationResult)$show()) simulationResultCodeBased <- eval(parse(text = getObjectRCode(simulationResult, stringWrapParagraphWidth = NULL))) expect_equal(simulationResultCodeBased$accrualIntensity, simulationResult$accrualIntensity, tolerance = 1e-05) expect_equal(simulationResultCodeBased$hazardRatio, simulationResult$hazardRatio, tolerance = 1e-05) expect_equal(simulationResultCodeBased$analysisTime, simulationResult$analysisTime, tolerance = 1e-05) expect_equal(simulationResultCodeBased$studyDuration, simulationResult$studyDuration, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsNotAchieved, simulationResult$eventsNotAchieved, tolerance = 1e-05) expect_equal(simulationResultCodeBased$numberOfSubjects, simulationResult$numberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$eventsPerStage, simulationResult$eventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallEventsPerStage, simulationResult$overallEventsPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$iterations, simulationResult$iterations, tolerance = 1e-05) expect_equal(simulationResultCodeBased$overallReject, simulationResult$overallReject, tolerance = 1e-05) expect_equal(simulationResultCodeBased$rejectPerStage, simulationResult$rejectPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$futilityPerStage, simulationResult$futilityPerStage, tolerance = 1e-05) expect_equal(simulationResultCodeBased$earlyStop, simulationResult$earlyStop, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfSubjects, simulationResult$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simulationResultCodeBased$expectedNumberOfEvents, simulationResult$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simulationResultCodeBased$conditionalPowerAchieved, simulationResult$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simulationResult), "character") df <- as.data.frame(simulationResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simulationResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationSurvival': Perform recalculation of number of events based on conditional power", { .skipTestIfDisabled() # Perform recalculation of number of events based on conditional power for a # three-stage design with inverse normal combination test, where the conditional power # is calculated under the specified effect size thetaH1 = 1.3 and up to a four-fold # increase in originally planned sample size (number of events) is allowed # Note that the first value in \code{minNumberOfEventsPerStage} and # \code{maxNumberOfEventsPerStage} is arbitrary, i.e., it has no effect. dIN <- getDesignInverseNormal(informationRates = c(0.4, 0.7, 1)) resultsWithSSR1 <- getSimulationSurvival( design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, thetaH1 = 1.3, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'resultsWithSSR1' with expected results expect_equal(resultsWithSSR1$pi1, c(0.3, 0.32452723, 0.34819506, 0.37103359, 0.39307188, 0.41433798, 0.43485894), tolerance = 1e-07) expect_equal(resultsWithSSR1$median1, c(23.320299, 21.200271, 19.433582, 17.938691, 16.657356, 15.546866, 14.575187), tolerance = 1e-07) expect_equal(resultsWithSSR1$median2, 23.320299, tolerance = 1e-07) expect_equal(resultsWithSSR1$accrualIntensity, 66.666667, tolerance = 1e-07) expect_equal(resultsWithSSR1$lambda1, c(0.029722912, 0.032695203, 0.035667494, 0.038639786, 0.041612077, 0.044584368, 0.047556659), tolerance = 1e-07) expect_equal(resultsWithSSR1$lambda2, 0.029722912, tolerance = 1e-07) expect_equal(resultsWithSSR1$analysisTime[1, ], c(7.9761501, 7.8239889, 7.5191849, 7.4832292, 7.3291066, 7.1091953, 6.9737455), tolerance = 1e-07) expect_equal(resultsWithSSR1$analysisTime[2, ], c(17.76189, 17.229038, 16.567328, 16.175906, 15.668575, 15.328143, 14.604753), tolerance = 1e-07) expect_equal(resultsWithSSR1$analysisTime[3, ], c(30.192276, 28.615009, 26.463502, 25.657109, 23.821118, 23.34898, 22.534023), tolerance = 1e-07) expect_equal(resultsWithSSR1$studyDuration, c(29.683899, 28.160756, 25.20615, 22.190278, 19.319577, 18.030286, 14.789904), tolerance = 1e-07) expect_equal(resultsWithSSR1$eventsNotAchieved[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$eventsNotAchieved[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$eventsNotAchieved[3, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$numberOfSubjects[1, ], c(531.25, 521.07, 500.8, 498.42, 488.13, 473.47, 464.37), tolerance = 1e-07) expect_equal(resultsWithSSR1$numberOfSubjects[2, ], c(800, 800, 799.45, 798.66327, 796.55208, 797.06061, 793.47826), tolerance = 1e-07) expect_equal(resultsWithSSR1$numberOfSubjects[3, ], c(800, 800, 800, 800, 800, 800, 800)) expect_equal(resultsWithSSR1$eventsPerStage[1, ], c(58, 58, 58, 58, 58, 58, 58)) expect_equal(resultsWithSSR1$eventsPerStage[2, ], c(175.65, 173.27, 171.84, 171.43878, 170.57292, 169.67677, 161.44565), tolerance = 1e-07) expect_equal(resultsWithSSR1$eventsPerStage[3, ], c(175.28125, 169.51042, 154.10227, 148.89552, 137.3, 142.17143, 133.72727), tolerance = 1e-07) expect_equal(resultsWithSSR1$overallEventsPerStage[1, ], c(58, 58, 58, 58, 58, 58, 58)) expect_equal(resultsWithSSR1$overallEventsPerStage[2, ], c(233.65, 231.27, 229.84, 229.43878, 228.57292, 227.67677, 219.44565), tolerance = 1e-07) expect_equal(resultsWithSSR1$overallEventsPerStage[3, ], c(408.93125, 400.78042, 383.94227, 378.3343, 365.87292, 369.8482, 353.17292), tolerance = 1e-07) expect_equal(resultsWithSSR1$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100)) expect_equal(resultsWithSSR1$iterations[2, ], c(100, 100, 100, 98, 96, 99, 92)) expect_equal(resultsWithSSR1$iterations[3, ], c(96, 96, 88, 67, 50, 35, 11)) expect_equal(resultsWithSSR1$overallReject, c(0.04, 0.16, 0.38, 0.75, 0.91, 0.95, 1), tolerance = 1e-07) expect_equal(resultsWithSSR1$rejectPerStage[1, ], c(0, 0, 0, 0.02, 0.04, 0.01, 0.08), tolerance = 1e-07) expect_equal(resultsWithSSR1$rejectPerStage[2, ], c(0.04, 0.04, 0.12, 0.31, 0.46, 0.64, 0.81), tolerance = 1e-07) expect_equal(resultsWithSSR1$rejectPerStage[3, ], c(0, 0.12, 0.26, 0.42, 0.41, 0.3, 0.11), tolerance = 1e-07) expect_equal(resultsWithSSR1$futilityStop, c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$futilityPerStage[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR1$earlyStop, c(0.04, 0.04, 0.12, 0.33, 0.5, 0.65, 0.89), tolerance = 1e-07) expect_equal(resultsWithSSR1$expectedNumberOfSubjects, c(800, 800, 799.934, 793.55401, 785.93916, 794.85349, 767.86699), tolerance = 1e-07) expect_equal(resultsWithSSR1$expectedNumberOfEvents, c(401.92, 394, 365.45, 325.77, 290.4, 275.74, 221.24), tolerance = 1e-07) expect_equal(resultsWithSSR1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(resultsWithSSR1$conditionalPowerAchieved[2, ], c(0.12165751, 0.15502837, 0.23497758, 0.29890789, 0.33886493, 0.41286728, 0.49916888), tolerance = 1e-07) expect_equal(resultsWithSSR1$conditionalPowerAchieved[3, ], c(0.14749827, 0.23857933, 0.44868993, 0.59763371, 0.65378645, 0.66059558, 0.69812096), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(resultsWithSSR1), NA))) expect_output(print(resultsWithSSR1)$show()) invisible(capture.output(expect_error(summary(resultsWithSSR1), NA))) expect_output(summary(resultsWithSSR1)$show()) resultsWithSSR1CodeBased <- eval(parse(text = getObjectRCode(resultsWithSSR1, stringWrapParagraphWidth = NULL))) expect_equal(resultsWithSSR1CodeBased$pi1, resultsWithSSR1$pi1, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$median1, resultsWithSSR1$median1, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$median2, resultsWithSSR1$median2, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$accrualIntensity, resultsWithSSR1$accrualIntensity, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$lambda1, resultsWithSSR1$lambda1, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$lambda2, resultsWithSSR1$lambda2, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$analysisTime, resultsWithSSR1$analysisTime, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$studyDuration, resultsWithSSR1$studyDuration, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$eventsNotAchieved, resultsWithSSR1$eventsNotAchieved, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$numberOfSubjects, resultsWithSSR1$numberOfSubjects, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$eventsPerStage, resultsWithSSR1$eventsPerStage, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$overallEventsPerStage, resultsWithSSR1$overallEventsPerStage, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$iterations, resultsWithSSR1$iterations, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$overallReject, resultsWithSSR1$overallReject, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$rejectPerStage, resultsWithSSR1$rejectPerStage, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$futilityStop, resultsWithSSR1$futilityStop, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$futilityPerStage, resultsWithSSR1$futilityPerStage, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$earlyStop, resultsWithSSR1$earlyStop, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$expectedNumberOfSubjects, resultsWithSSR1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$expectedNumberOfEvents, resultsWithSSR1$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(resultsWithSSR1CodeBased$conditionalPowerAchieved, resultsWithSSR1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(resultsWithSSR1), "character") df <- as.data.frame(resultsWithSSR1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(resultsWithSSR1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # If thetaH1 is unspecified, the observed hazard ratio estimate # (calculated from the log-rank statistic) is used for performing the # recalculation of the number of events resultsWithSSR2 <- getSimulationSurvival( design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 146), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of SimulationResultsSurvival object 'resultsWithSSR2' with expected results expect_equal(resultsWithSSR2$pi1, c(0.3, 0.32452723, 0.34819506, 0.37103359, 0.39307188, 0.41433798, 0.43485894), tolerance = 1e-07) expect_equal(resultsWithSSR2$median1, c(23.320299, 21.200271, 19.433582, 17.938691, 16.657356, 15.546866, 14.575187), tolerance = 1e-07) expect_equal(resultsWithSSR2$median2, 23.320299, tolerance = 1e-07) expect_equal(resultsWithSSR2$accrualIntensity, 66.666667, tolerance = 1e-07) expect_equal(resultsWithSSR2$lambda1, c(0.029722912, 0.032695203, 0.035667494, 0.038639786, 0.041612077, 0.044584368, 0.047556659), tolerance = 1e-07) expect_equal(resultsWithSSR2$lambda2, 0.029722912, tolerance = 1e-07) expect_equal(resultsWithSSR2$analysisTime[1, ], c(7.9761501, 7.8239889, 7.5191849, 7.4832292, 7.3291066, 7.1091953, 6.9737455), tolerance = 1e-07) expect_equal(resultsWithSSR2$analysisTime[2, ], c(17.532866, 16.792737, 15.753436, 15.242772, 14.414526, 13.395253, 12.536642), tolerance = 1e-07) expect_equal(resultsWithSSR2$analysisTime[3, ], c(29.782185, 28.27297, 25.249508, 24.235039, 21.407797, 20.846814, 17.625231), tolerance = 1e-07) expect_equal(resultsWithSSR2$studyDuration, c(29.663096, 27.530562, 24.305604, 21.136576, 18.176787, 16.398878, 13.170673), tolerance = 1e-07) expect_equal(resultsWithSSR2$eventsNotAchieved[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$eventsNotAchieved[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$eventsNotAchieved[3, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$numberOfSubjects[1, ], c(531.25, 521.07, 500.8, 498.42, 488.13, 473.47, 464.37), tolerance = 1e-07) expect_equal(resultsWithSSR2$numberOfSubjects[2, ], c(798.3, 792.67, 784.71, 785.72449, 774.40625, 754.47475, 731), tolerance = 1e-07) expect_equal(resultsWithSSR2$numberOfSubjects[3, ], c(800, 800, 800, 800, 799.08333, 797.51111, 794.95238), tolerance = 1e-07) expect_equal(resultsWithSSR2$eventsPerStage[1, ], c(58, 58, 58, 58, 58, 58, 58)) expect_equal(resultsWithSSR2$eventsPerStage[2, ], c(171.71, 164.76, 155.91, 152.63265, 143.21875, 127.82828, 113.84783), tolerance = 1e-07) expect_equal(resultsWithSSR2$eventsPerStage[3, ], c(173.88889, 169.45263, 147.34783, 139.60563, 120.25, 125.66667, 100.2381), tolerance = 1e-07) expect_equal(resultsWithSSR2$overallEventsPerStage[1, ], c(58, 58, 58, 58, 58, 58, 58)) expect_equal(resultsWithSSR2$overallEventsPerStage[2, ], c(229.71, 222.76, 213.91, 210.63265, 201.21875, 185.82828, 171.84783), tolerance = 1e-07) expect_equal(resultsWithSSR2$overallEventsPerStage[3, ], c(403.59889, 392.21263, 361.25783, 350.23829, 321.46875, 311.49495, 272.08592), tolerance = 1e-07) expect_equal(resultsWithSSR2$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100)) expect_equal(resultsWithSSR2$iterations[2, ], c(100, 100, 100, 98, 96, 99, 92)) expect_equal(resultsWithSSR2$iterations[3, ], c(99, 95, 92, 71, 60, 45, 21)) expect_equal(resultsWithSSR2$overallReject, c(0.04, 0.16, 0.37, 0.68, 0.88, 0.92, 0.98), tolerance = 1e-07) expect_equal(resultsWithSSR2$rejectPerStage[1, ], c(0, 0, 0, 0.02, 0.04, 0.01, 0.08), tolerance = 1e-07) expect_equal(resultsWithSSR2$rejectPerStage[2, ], c(0.01, 0.05, 0.08, 0.27, 0.36, 0.54, 0.71), tolerance = 1e-07) expect_equal(resultsWithSSR2$rejectPerStage[3, ], c(0.03, 0.11, 0.29, 0.39, 0.48, 0.37, 0.19), tolerance = 1e-07) expect_equal(resultsWithSSR2$futilityStop, c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$futilityPerStage[2, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(resultsWithSSR2$earlyStop, c(0.01, 0.05, 0.08, 0.29, 0.4, 0.55, 0.79), tolerance = 1e-07) expect_equal(resultsWithSSR2$expectedNumberOfSubjects, c(799.983, 799.6335, 798.7768, 790.11401, 777.76145, 771.03106, 723.0996), tolerance = 1e-07) expect_equal(resultsWithSSR2$expectedNumberOfEvents, c(401.86, 383.74, 349.47, 306.7, 267.64, 241.1, 183.79), tolerance = 1e-07) expect_equal(resultsWithSSR2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(resultsWithSSR2$conditionalPowerAchieved[2, ], c(0.13442705, 0.17515425, 0.27216274, 0.37121019, 0.42163288, 0.51345413, 0.62679958), tolerance = 1e-07) expect_equal(resultsWithSSR2$conditionalPowerAchieved[3, ], c(0.088787205, 0.13342075, 0.37806621, 0.51790868, 0.64116584, 0.64220287, 0.73456911), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(resultsWithSSR2), NA))) expect_output(print(resultsWithSSR2)$show()) invisible(capture.output(expect_error(summary(resultsWithSSR2), NA))) expect_output(summary(resultsWithSSR2)$show()) resultsWithSSR2CodeBased <- eval(parse(text = getObjectRCode(resultsWithSSR2, stringWrapParagraphWidth = NULL))) expect_equal(resultsWithSSR2CodeBased$pi1, resultsWithSSR2$pi1, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$median1, resultsWithSSR2$median1, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$median2, resultsWithSSR2$median2, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$accrualIntensity, resultsWithSSR2$accrualIntensity, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$lambda1, resultsWithSSR2$lambda1, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$lambda2, resultsWithSSR2$lambda2, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$analysisTime, resultsWithSSR2$analysisTime, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$studyDuration, resultsWithSSR2$studyDuration, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$eventsNotAchieved, resultsWithSSR2$eventsNotAchieved, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$numberOfSubjects, resultsWithSSR2$numberOfSubjects, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$eventsPerStage, resultsWithSSR2$eventsPerStage, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$overallEventsPerStage, resultsWithSSR2$overallEventsPerStage, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$iterations, resultsWithSSR2$iterations, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$overallReject, resultsWithSSR2$overallReject, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$rejectPerStage, resultsWithSSR2$rejectPerStage, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$futilityStop, resultsWithSSR2$futilityStop, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$futilityPerStage, resultsWithSSR2$futilityPerStage, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$earlyStop, resultsWithSSR2$earlyStop, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$expectedNumberOfSubjects, resultsWithSSR2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$expectedNumberOfEvents, resultsWithSSR2$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(resultsWithSSR2CodeBased$conditionalPowerAchieved, resultsWithSSR2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(resultsWithSSR2), "character") df <- as.data.frame(resultsWithSSR2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(resultsWithSSR2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # Compare it with design without event size recalculation resultsWithoutSSR <- getSimulationSurvival( design = dIN, hazardRatio = seq(1, 1.6, 0.1), pi2 = 0.3, plannedEvents = c(58, 102, 145), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of numeric object 'resultsWithoutSSR$overallReject' with expected results expect_equal(resultsWithoutSSR$overallReject, c(0.06, 0.09, 0.26, 0.36, 0.5, 0.62, 0.8), tolerance = 1e-07) ## Comparison of the results of numeric object 'resultsWithSSR1$overallReject' with expected results expect_equal(resultsWithSSR1$overallReject, c(0.04, 0.16, 0.38, 0.75, 0.91, 0.95, 1), tolerance = 1e-07) ## Comparison of the results of numeric object 'resultsWithSSR2$overallReject' with expected results expect_equal(resultsWithSSR2$overallReject, c(0.04, 0.16, 0.37, 0.68, 0.88, 0.92, 0.98), tolerance = 1e-07) }) test_that("'getSimulationSurvival': Confirm that event size racalcuation increases the Type I error rate, i.e., you have to use the combination test", { .skipTestIfDisabled() dGS <- getDesignGroupSequential(informationRates = c(0.4, 0.7, 1)) resultsWithSSRGS <- getSimulationSurvival( design = dGS, hazardRatio = seq(1), pi2 = 0.3, conditionalPower = 0.8, plannedEvents = c(58, 102, 145), minNumberOfEventsPerStage = c(NA_real_, 44, 44), maxNumberOfEventsPerStage = 4 * c(NA_real_, 44, 44), maxNumberOfSubjects = 800, maxNumberOfIterations = 100, seed = 1234567890 ) ## Comparison of the results of numeric object 'resultsWithSSRGS$overallReject' with expected results expect_equal(resultsWithSSRGS$overallReject, 0.05, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Confirm that different inputs of lambda, median, and pi with the identical meaning result in the same output", { .skipTestIfDisabled() x1 <- getSimulationSurvival( lambda2 = 0.4, hazardRatio = c(0.65, 0.7), plannedEvents = 98, maxNumberOfSubjects = 120, directionUpper = FALSE, maxNumberOfIterations = 1000, sided = 1, alpha = 0.1, seed = 123 ) x2 <- getSimulationSurvival( lambda2 = x1$.piecewiseSurvivalTime$lambda2, lambda1 = x1$.piecewiseSurvivalTime$lambda1, plannedEvents = 98, maxNumberOfSubjects = 120, directionUpper = FALSE, maxNumberOfIterations = 1000, sided = 1, alpha = 0.1, seed = 123 ) x3 <- getSimulationSurvival( piecewiseSurvivalTime = x2$.piecewiseSurvivalTime, plannedEvents = 98, maxNumberOfSubjects = 120, directionUpper = FALSE, maxNumberOfIterations = 1000, sided = 1, alpha = 0.1, seed = 123 ) x4 <- getSimulationSurvival( pi2 = getPiByLambda(x1$.piecewiseSurvivalTime$lambda2, 12L), hazardRatio = c(0.65, 0.7), plannedEvents = 98, maxNumberOfSubjects = 120, directionUpper = FALSE, maxNumberOfIterations = 1000, sided = 1, alpha = 0.1, seed = 123 ) x5 <- getSimulationSurvival( lambda2 = 0.4, lambda1 = x4$lambda1, plannedEvents = 98, maxNumberOfSubjects = 120, directionUpper = FALSE, maxNumberOfIterations = 1000, sided = 1, alpha = 0.1, seed = 123 ) x6 <- getSimulationSurvival( median2 = x5$median2, hazardRatio = c(0.65, 0.7), plannedEvents = 98, maxNumberOfSubjects = 120, directionUpper = FALSE, maxNumberOfIterations = 1000, sided = 1, alpha = 0.1, seed = 123 ) x7 <- getSimulationSurvival( median2 = x5$median2, median1 = x5$median1, plannedEvents = 98, maxNumberOfSubjects = 120, directionUpper = FALSE, maxNumberOfIterations = 1000, sided = 1, alpha = 0.1, seed = 123 ) ## Pairwise comparison of the results of x1 with the results of x2, x3, x4, x5, x6, and x7 expect_equal(x2$maxNumberOfIterations, x1$maxNumberOfIterations) expect_equal(x2$seed, x1$seed) expect_equal(x2$allocationRatioPlanned, x1$allocationRatioPlanned) expect_equal(x2$conditionalPower, x1$conditionalPower) expect_equal(x2$iterations[1, ], x1$iterations[1, ]) expect_equal(x2$futilityStop, x1$futilityStop) expect_equal(x2$directionUpper, x1$directionUpper) expect_equal(x2$plannedEvents, x1$plannedEvents) expect_equal(x2$minNumberOfEventsPerStage, x1$minNumberOfEventsPerStage) expect_equal(x2$maxNumberOfEventsPerStage, x1$maxNumberOfEventsPerStage) expect_equal(x2$thetaH1, x1$thetaH1) expect_equal(x2$expectedNumberOfEvents, x1$expectedNumberOfEvents) expect_equal(x2$pi1, x1$pi1, tolerance = 1e-07) expect_equal(x2$pi2, x1$pi2, tolerance = 1e-07) expect_equal(x2$median1, x1$median1, tolerance = 1e-07) expect_equal(x2$median2, x1$median2, tolerance = 1e-07) expect_equal(x2$maxNumberOfSubjects, x1$maxNumberOfSubjects) expect_equal(x2$accrualTime, x1$accrualTime) expect_equal(x2$accrualIntensity, x1$accrualIntensity) expect_equal(x2$dropoutRate1, x1$dropoutRate1) expect_equal(x2$dropoutRate2, x1$dropoutRate2) expect_equal(x2$dropoutTime, x1$dropoutTime) expect_equal(x2$eventTime, x1$eventTime) expect_equal(x2$thetaH0, x1$thetaH0) expect_equal(x2$allocation1, x1$allocation1) expect_equal(x2$allocation2, x1$allocation2) expect_equal(x2$kappa, x1$kappa) expect_equal(x2$piecewiseSurvivalTime, x1$piecewiseSurvivalTime) expect_equal(x2$lambda1, x1$lambda1, tolerance = 1e-07) expect_equal(x2$lambda2, x1$lambda2, tolerance = 1e-07) expect_equal(x2$earlyStop, x1$earlyStop) expect_equal(x2$hazardRatio, x1$hazardRatio, tolerance = 1e-07) expect_equal(x2$analysisTime[1, ], x1$analysisTime[1, ], tolerance = 1e-07) expect_equal(x2$studyDuration, x1$studyDuration, tolerance = 1e-07) expect_equal(x2$eventsNotAchieved[1, ], x1$eventsNotAchieved[1, ]) expect_equal(x2$numberOfSubjects[1, ], x1$numberOfSubjects[1, ], tolerance = 1e-07) expect_equal(x2$numberOfSubjects1[1, ], x1$numberOfSubjects1[1, ], tolerance = 1e-07) expect_equal(x2$numberOfSubjects2[1, ], x1$numberOfSubjects2[1, ], tolerance = 1e-07) expect_equal(x2$eventsPerStage[1, ], x1$eventsPerStage[1, ]) expect_equal(x2$overallEventsPerStage[1, ], x1$overallEventsPerStage[1, ]) expect_equal(x2$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-07) expect_equal(x2$rejectPerStage[1, ], x1$rejectPerStage[1, ], tolerance = 1e-07) expect_equal(x2$overallReject, x1$overallReject, tolerance = 1e-07) expect_equal(x3$maxNumberOfIterations, x1$maxNumberOfIterations) expect_equal(x3$seed, x1$seed) expect_equal(x3$allocationRatioPlanned, x1$allocationRatioPlanned) expect_equal(x3$conditionalPower, x1$conditionalPower) expect_equal(x3$iterations[1, ], x1$iterations[1, ]) expect_equal(x3$futilityStop, x1$futilityStop) expect_equal(x3$directionUpper, x1$directionUpper) expect_equal(x3$plannedEvents, x1$plannedEvents) expect_equal(x3$minNumberOfEventsPerStage, x1$minNumberOfEventsPerStage) expect_equal(x3$maxNumberOfEventsPerStage, x1$maxNumberOfEventsPerStage) expect_equal(x3$thetaH1, x1$thetaH1) expect_equal(x3$expectedNumberOfEvents, x1$expectedNumberOfEvents) expect_equal(x3$pi1, x1$pi1, tolerance = 1e-07) expect_equal(x3$pi2, x1$pi2, tolerance = 1e-07) expect_equal(x3$median1, x1$median1, tolerance = 1e-07) expect_equal(x3$median2, x1$median2, tolerance = 1e-07) expect_equal(x3$maxNumberOfSubjects, x1$maxNumberOfSubjects) expect_equal(x3$accrualTime, x1$accrualTime) expect_equal(x3$accrualIntensity, x1$accrualIntensity) expect_equal(x3$dropoutRate1, x1$dropoutRate1) expect_equal(x3$dropoutRate2, x1$dropoutRate2) expect_equal(x3$dropoutTime, x1$dropoutTime) expect_equal(x3$eventTime, x1$eventTime) expect_equal(x3$thetaH0, x1$thetaH0) expect_equal(x3$allocation1, x1$allocation1) expect_equal(x3$allocation2, x1$allocation2) expect_equal(x3$kappa, x1$kappa) expect_equal(x3$piecewiseSurvivalTime, x1$piecewiseSurvivalTime) expect_equal(x3$lambda1, x1$lambda1, tolerance = 1e-07) expect_equal(x3$lambda2, x1$lambda2, tolerance = 1e-07) expect_equal(x3$earlyStop, x1$earlyStop) expect_equal(x3$hazardRatio, x1$hazardRatio, tolerance = 1e-07) expect_equal(x3$analysisTime[1, ], x1$analysisTime[1, ], tolerance = 1e-07) expect_equal(x3$studyDuration, x1$studyDuration, tolerance = 1e-07) expect_equal(x3$eventsNotAchieved[1, ], x1$eventsNotAchieved[1, ]) expect_equal(x3$numberOfSubjects[1, ], x1$numberOfSubjects[1, ], tolerance = 1e-07) expect_equal(x3$numberOfSubjects1[1, ], x1$numberOfSubjects1[1, ], tolerance = 1e-07) expect_equal(x3$numberOfSubjects2[1, ], x1$numberOfSubjects2[1, ], tolerance = 1e-07) expect_equal(x3$eventsPerStage[1, ], x1$eventsPerStage[1, ]) expect_equal(x3$overallEventsPerStage[1, ], x1$overallEventsPerStage[1, ]) expect_equal(x3$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-07) expect_equal(x3$rejectPerStage[1, ], x1$rejectPerStage[1, ], tolerance = 1e-07) expect_equal(x3$overallReject, x1$overallReject, tolerance = 1e-07) expect_equal(x4$maxNumberOfIterations, x1$maxNumberOfIterations) expect_equal(x4$seed, x1$seed) expect_equal(x4$allocationRatioPlanned, x1$allocationRatioPlanned) expect_equal(x4$conditionalPower, x1$conditionalPower) expect_equal(x4$iterations[1, ], x1$iterations[1, ]) expect_equal(x4$futilityStop, x1$futilityStop) expect_equal(x4$directionUpper, x1$directionUpper) expect_equal(x4$plannedEvents, x1$plannedEvents) expect_equal(x4$minNumberOfEventsPerStage, x1$minNumberOfEventsPerStage) expect_equal(x4$maxNumberOfEventsPerStage, x1$maxNumberOfEventsPerStage) expect_equal(x4$thetaH1, x1$thetaH1) expect_equal(x4$expectedNumberOfEvents, x1$expectedNumberOfEvents) expect_equal(x4$pi1, x1$pi1, tolerance = 1e-07) expect_equal(x4$pi2, x1$pi2, tolerance = 1e-07) expect_equal(x4$median1, x1$median1, tolerance = 1e-07) expect_equal(x4$median2, x1$median2, tolerance = 1e-07) expect_equal(x4$maxNumberOfSubjects, x1$maxNumberOfSubjects) expect_equal(x4$accrualTime, x1$accrualTime) expect_equal(x4$accrualIntensity, x1$accrualIntensity) expect_equal(x4$dropoutRate1, x1$dropoutRate1) expect_equal(x4$dropoutRate2, x1$dropoutRate2) expect_equal(x4$dropoutTime, x1$dropoutTime) expect_equal(x4$thetaH0, x1$thetaH0) expect_equal(x4$allocation1, x1$allocation1) expect_equal(x4$allocation2, x1$allocation2) expect_equal(x4$kappa, x1$kappa) expect_equal(x4$piecewiseSurvivalTime, x1$piecewiseSurvivalTime) expect_equal(x4$lambda1, x1$lambda1, tolerance = 1e-07) expect_equal(x4$lambda2, x1$lambda2, tolerance = 1e-07) expect_equal(x4$earlyStop, x1$earlyStop) expect_equal(x4$hazardRatio, x1$hazardRatio, tolerance = 1e-07) expect_equal(x4$analysisTime[1, ], x1$analysisTime[1, ], tolerance = 1e-07) expect_equal(x4$studyDuration, x1$studyDuration, tolerance = 1e-07) expect_equal(x4$eventsNotAchieved[1, ], x1$eventsNotAchieved[1, ]) expect_equal(x4$numberOfSubjects[1, ], x1$numberOfSubjects[1, ], tolerance = 1e-07) expect_equal(x4$numberOfSubjects1[1, ], x1$numberOfSubjects1[1, ], tolerance = 1e-07) expect_equal(x4$numberOfSubjects2[1, ], x1$numberOfSubjects2[1, ], tolerance = 1e-07) expect_equal(x4$eventsPerStage[1, ], x1$eventsPerStage[1, ]) expect_equal(x4$overallEventsPerStage[1, ], x1$overallEventsPerStage[1, ]) expect_equal(x4$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-07) expect_equal(x4$rejectPerStage[1, ], x1$rejectPerStage[1, ], tolerance = 1e-07) expect_equal(x4$overallReject, x1$overallReject, tolerance = 1e-07) expect_equal(x5$maxNumberOfIterations, x1$maxNumberOfIterations) expect_equal(x5$seed, x1$seed) expect_equal(x5$allocationRatioPlanned, x1$allocationRatioPlanned) expect_equal(x5$conditionalPower, x1$conditionalPower) expect_equal(x5$iterations[1, ], x1$iterations[1, ]) expect_equal(x5$futilityStop, x1$futilityStop) expect_equal(x5$directionUpper, x1$directionUpper) expect_equal(x5$plannedEvents, x1$plannedEvents) expect_equal(x5$minNumberOfEventsPerStage, x1$minNumberOfEventsPerStage) expect_equal(x5$maxNumberOfEventsPerStage, x1$maxNumberOfEventsPerStage) expect_equal(x5$thetaH1, x1$thetaH1) expect_equal(x5$expectedNumberOfEvents, x1$expectedNumberOfEvents) expect_equal(x5$pi1, x1$pi1, tolerance = 1e-07) expect_equal(x5$pi2, x1$pi2, tolerance = 1e-07) expect_equal(x5$median1, x1$median1, tolerance = 1e-07) expect_equal(x5$median2, x1$median2, tolerance = 1e-07) expect_equal(x5$maxNumberOfSubjects, x1$maxNumberOfSubjects) expect_equal(x5$accrualTime, x1$accrualTime) expect_equal(x5$accrualIntensity, x1$accrualIntensity) expect_equal(x5$dropoutRate1, x1$dropoutRate1) expect_equal(x5$dropoutRate2, x1$dropoutRate2) expect_equal(x5$dropoutTime, x1$dropoutTime) expect_equal(x5$eventTime, x1$eventTime) expect_equal(x5$thetaH0, x1$thetaH0) expect_equal(x5$allocation1, x1$allocation1) expect_equal(x5$allocation2, x1$allocation2) expect_equal(x5$kappa, x1$kappa) expect_equal(x5$piecewiseSurvivalTime, x1$piecewiseSurvivalTime) expect_equal(x5$lambda1, x1$lambda1, tolerance = 1e-07) expect_equal(x5$lambda2, x1$lambda2, tolerance = 1e-07) expect_equal(x5$earlyStop, x1$earlyStop) expect_equal(x5$hazardRatio, x1$hazardRatio, tolerance = 1e-07) expect_equal(x5$analysisTime[1, ], x1$analysisTime[1, ], tolerance = 1e-07) expect_equal(x5$studyDuration, x1$studyDuration, tolerance = 1e-07) expect_equal(x5$eventsNotAchieved[1, ], x1$eventsNotAchieved[1, ]) expect_equal(x5$numberOfSubjects[1, ], x1$numberOfSubjects[1, ], tolerance = 1e-07) expect_equal(x5$numberOfSubjects1[1, ], x1$numberOfSubjects1[1, ], tolerance = 1e-07) expect_equal(x5$numberOfSubjects2[1, ], x1$numberOfSubjects2[1, ], tolerance = 1e-07) expect_equal(x5$eventsPerStage[1, ], x1$eventsPerStage[1, ]) expect_equal(x5$overallEventsPerStage[1, ], x1$overallEventsPerStage[1, ]) expect_equal(x5$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-07) expect_equal(x5$rejectPerStage[1, ], x1$rejectPerStage[1, ], tolerance = 1e-07) expect_equal(x5$overallReject, x1$overallReject, tolerance = 1e-07) expect_equal(x6$maxNumberOfIterations, x1$maxNumberOfIterations) expect_equal(x6$seed, x1$seed) expect_equal(x6$allocationRatioPlanned, x1$allocationRatioPlanned) expect_equal(x6$conditionalPower, x1$conditionalPower) expect_equal(x6$iterations[1, ], x1$iterations[1, ]) expect_equal(x6$futilityStop, x1$futilityStop) expect_equal(x6$directionUpper, x1$directionUpper) expect_equal(x6$plannedEvents, x1$plannedEvents) expect_equal(x6$minNumberOfEventsPerStage, x1$minNumberOfEventsPerStage) expect_equal(x6$maxNumberOfEventsPerStage, x1$maxNumberOfEventsPerStage) expect_equal(x6$thetaH1, x1$thetaH1) expect_equal(x6$expectedNumberOfEvents, x1$expectedNumberOfEvents) expect_equal(x6$pi1, x1$pi1, tolerance = 1e-07) expect_equal(x6$pi2, x1$pi2, tolerance = 1e-07) expect_equal(x6$median1, x1$median1, tolerance = 1e-07) expect_equal(x6$median2, x1$median2, tolerance = 1e-07) expect_equal(x6$maxNumberOfSubjects, x1$maxNumberOfSubjects) expect_equal(x6$accrualTime, x1$accrualTime) expect_equal(x6$accrualIntensity, x1$accrualIntensity) expect_equal(x6$dropoutRate1, x1$dropoutRate1) expect_equal(x6$dropoutRate2, x1$dropoutRate2) expect_equal(x6$dropoutTime, x1$dropoutTime) expect_equal(x6$eventTime, x1$eventTime) expect_equal(x6$thetaH0, x1$thetaH0) expect_equal(x6$allocation1, x1$allocation1) expect_equal(x6$allocation2, x1$allocation2) expect_equal(x6$kappa, x1$kappa) expect_equal(x6$piecewiseSurvivalTime, x1$piecewiseSurvivalTime) expect_equal(x6$lambda1, x1$lambda1, tolerance = 1e-07) expect_equal(x6$lambda2, x1$lambda2, tolerance = 1e-07) expect_equal(x6$earlyStop, x1$earlyStop) expect_equal(x6$hazardRatio, x1$hazardRatio, tolerance = 1e-07) expect_equal(x6$analysisTime[1, ], x1$analysisTime[1, ], tolerance = 1e-07) expect_equal(x6$studyDuration, x1$studyDuration, tolerance = 1e-07) expect_equal(x6$eventsNotAchieved[1, ], x1$eventsNotAchieved[1, ]) expect_equal(x6$numberOfSubjects[1, ], x1$numberOfSubjects[1, ], tolerance = 1e-07) expect_equal(x6$numberOfSubjects1[1, ], x1$numberOfSubjects1[1, ], tolerance = 1e-07) expect_equal(x6$numberOfSubjects2[1, ], x1$numberOfSubjects2[1, ], tolerance = 1e-07) expect_equal(x6$eventsPerStage[1, ], x1$eventsPerStage[1, ]) expect_equal(x6$overallEventsPerStage[1, ], x1$overallEventsPerStage[1, ]) expect_equal(x6$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-07) expect_equal(x6$rejectPerStage[1, ], x1$rejectPerStage[1, ], tolerance = 1e-07) expect_equal(x6$overallReject, x1$overallReject, tolerance = 1e-07) expect_equal(x7$maxNumberOfIterations, x1$maxNumberOfIterations) expect_equal(x7$seed, x1$seed) expect_equal(x7$allocationRatioPlanned, x1$allocationRatioPlanned) expect_equal(x7$conditionalPower, x1$conditionalPower) expect_equal(x7$iterations[1, ], x1$iterations[1, ]) expect_equal(x7$futilityStop, x1$futilityStop) expect_equal(x7$directionUpper, x1$directionUpper) expect_equal(x7$plannedEvents, x1$plannedEvents) expect_equal(x7$minNumberOfEventsPerStage, x1$minNumberOfEventsPerStage) expect_equal(x7$maxNumberOfEventsPerStage, x1$maxNumberOfEventsPerStage) expect_equal(x7$thetaH1, x1$thetaH1) expect_equal(x7$expectedNumberOfEvents, x1$expectedNumberOfEvents) expect_equal(x7$pi1, x1$pi1, tolerance = 1e-07) expect_equal(x7$pi2, x1$pi2, tolerance = 1e-07) expect_equal(x7$median1, x1$median1, tolerance = 1e-07) expect_equal(x7$median2, x1$median2, tolerance = 1e-07) expect_equal(x7$maxNumberOfSubjects, x1$maxNumberOfSubjects) expect_equal(x7$accrualTime, x1$accrualTime) expect_equal(x7$accrualIntensity, x1$accrualIntensity) expect_equal(x7$dropoutRate1, x1$dropoutRate1) expect_equal(x7$dropoutRate2, x1$dropoutRate2) expect_equal(x7$dropoutTime, x1$dropoutTime) expect_equal(x7$eventTime, x1$eventTime) expect_equal(x7$thetaH0, x1$thetaH0) expect_equal(x7$allocation1, x1$allocation1) expect_equal(x7$allocation2, x1$allocation2) expect_equal(x7$kappa, x1$kappa) expect_equal(x7$piecewiseSurvivalTime, x1$piecewiseSurvivalTime) expect_equal(x7$lambda1, x1$lambda1, tolerance = 1e-07) expect_equal(x7$lambda2, x1$lambda2, tolerance = 1e-07) expect_equal(x7$earlyStop, x1$earlyStop) expect_equal(x7$hazardRatio, x1$hazardRatio, tolerance = 1e-07) expect_equal(x7$analysisTime[1, ], x1$analysisTime[1, ], tolerance = 1e-07) expect_equal(x7$studyDuration, x1$studyDuration, tolerance = 1e-07) expect_equal(x7$eventsNotAchieved[1, ], x1$eventsNotAchieved[1, ]) expect_equal(x7$numberOfSubjects[1, ], x1$numberOfSubjects[1, ], tolerance = 1e-07) expect_equal(x7$numberOfSubjects1[1, ], x1$numberOfSubjects1[1, ], tolerance = 1e-07) expect_equal(x7$numberOfSubjects2[1, ], x1$numberOfSubjects2[1, ], tolerance = 1e-07) expect_equal(x7$eventsPerStage[1, ], x1$eventsPerStage[1, ]) expect_equal(x7$overallEventsPerStage[1, ], x1$overallEventsPerStage[1, ]) expect_equal(x7$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-07) expect_equal(x7$rejectPerStage[1, ], x1$rejectPerStage[1, ], tolerance = 1e-07) expect_equal(x7$overallReject, x1$overallReject, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Confirm that different definitions of delayed response with the identical meaning result in the same output", { .skipTestIfDisabled() x1 <- getSimulationSurvival( piecewiseSurvivalTime = c(0, 6), lambda2 = c(1.7, 1.2), hazardRatio = c(0.65, 0.7), plannedEvents = 98, maxNumberOfSubjects = 120, directionUpper = FALSE, maxNumberOfIterations = 1000, sided = 1, alpha = 0.1, seed = 123 ) x2 <- getSimulationSurvival( piecewiseSurvivalTime = list("<6" = 1.7, "6 - Inf" = 1.2), hazardRatio = c(0.65, 0.7), plannedEvents = 98, maxNumberOfSubjects = 120, directionUpper = FALSE, maxNumberOfIterations = 1000, sided = 1, alpha = 0.1, seed = 123 ) ## Pairwise comparison of the results of x1 with the results of x2 expect_equal(x2$maxNumberOfIterations, x1$maxNumberOfIterations) expect_equal(x2$seed, x1$seed) expect_equal(x2$allocationRatioPlanned, x1$allocationRatioPlanned) expect_equal(x2$conditionalPower, x1$conditionalPower) expect_equal(x2$iterations[1, ], x1$iterations[1, ]) expect_equal(x2$futilityStop, x1$futilityStop) expect_equal(x2$directionUpper, x1$directionUpper) expect_equal(x2$plannedEvents, x1$plannedEvents) expect_equal(x2$minNumberOfEventsPerStage, x1$minNumberOfEventsPerStage) expect_equal(x2$maxNumberOfEventsPerStage, x1$maxNumberOfEventsPerStage) expect_equal(x2$thetaH1, x1$thetaH1) expect_equal(x2$expectedNumberOfEvents, x1$expectedNumberOfEvents) expect_equal(x2$pi1, x1$pi1) expect_equal(x2$pi2, x1$pi2) expect_equal(x2$median1, x1$median1) expect_equal(x2$median2, x1$median2) expect_equal(x2$maxNumberOfSubjects, x1$maxNumberOfSubjects) expect_equal(x2$accrualTime, x1$accrualTime) expect_equal(x2$accrualIntensity, x1$accrualIntensity) expect_equal(x2$dropoutRate1, x1$dropoutRate1) expect_equal(x2$dropoutRate2, x1$dropoutRate2) expect_equal(x2$dropoutTime, x1$dropoutTime) expect_equal(x2$eventTime, x1$eventTime) expect_equal(x2$thetaH0, x1$thetaH0) expect_equal(x2$allocation1, x1$allocation1) expect_equal(x2$allocation2, x1$allocation2) expect_equal(x2$kappa, x1$kappa) expect_equal(x2$piecewiseSurvivalTime, x1$piecewiseSurvivalTime) expect_equal(x2$lambda1, x1$lambda1, tolerance = 1e-07) expect_equal(x2$lambda2, x1$lambda2, tolerance = 1e-07) expect_equal(x2$earlyStop, x1$earlyStop) expect_equal(x2$hazardRatio, x1$hazardRatio, tolerance = 1e-07) expect_equal(x2$analysisTime[1, ], x1$analysisTime[1, ], tolerance = 1e-07) expect_equal(x2$studyDuration, x1$studyDuration, tolerance = 1e-07) expect_equal(x2$eventsNotAchieved[1, ], x1$eventsNotAchieved[1, ]) expect_equal(x2$numberOfSubjects[1, ], x1$numberOfSubjects[1, ], tolerance = 1e-07) expect_equal(x2$numberOfSubjects1[1, ], x1$numberOfSubjects1[1, ], tolerance = 1e-07) expect_equal(x2$numberOfSubjects2[1, ], x1$numberOfSubjects2[1, ], tolerance = 1e-07) expect_equal(x2$eventsPerStage[1, ], x1$eventsPerStage[1, ]) expect_equal(x2$overallEventsPerStage[1, ], x1$overallEventsPerStage[1, ]) expect_equal(x2$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-07) expect_equal(x2$rejectPerStage[1, ], x1$rejectPerStage[1, ], tolerance = 1e-07) expect_equal(x2$overallReject, x1$overallReject, tolerance = 1e-07) }) test_that("'getSimulationSurvival': Confirm that the function works correctly with a user defined 'calcEventsFunction'", { design <- getDesignInverseNormal(futilityBounds = c(0.5, 0.5)) myCalcEventsFunction <- function(..., stage, conditionalPower, thetaH0, estimatedTheta, plannedEvents, eventsOverStages, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, allocationRatioPlanned, conditionalCriticalValue) { theta <- max(1 + 1e-12, estimatedTheta) requiredStageEvents <- max(0, conditionalCriticalValue + qnorm(conditionalPower))^2 * (1 + allocationRatioPlanned)^2 / (allocationRatioPlanned) / log(theta / thetaH0)^2 requiredStageEvents <- min( max(minNumberOfEventsPerStage[stage], requiredStageEvents), maxNumberOfEventsPerStage[stage] ) + eventsOverStages[stage - 1] return(requiredStageEvents) } simResults <- getSimulationSurvival( design = design, directionUpper = FALSE, maxNumberOfSubjects = 260, plannedEvents = (1:design$kMax) * 20, allocation1 = 1, allocation2 = 1, accrualTime = c(0, 3, 6, 12), lambda2 = 0.03, hazardRatio = c(0.75, 0.8, 0.9), accrualIntensity = c(0.1, 0.2, 0.2), dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA_real_, 10, 10), maxNumberOfEventsPerStage = c(NA_real_, 100, 100), maxNumberOfIterations = 10, seed = 1234567890, calcEventsFunction = myCalcEventsFunction ) ## Comparison of the results of SimulationResultsSurvival object 'simResults' with expected results expect_equal(simResults$pi1, c(0.23662051, 0.25023841, 0.27674976), tolerance = 1e-07) expect_equal(simResults$pi2, 0.30232367, tolerance = 1e-07) expect_equal(simResults$median1, c(30.806541, 28.881133, 25.672118), tolerance = 1e-07) expect_equal(simResults$median2, 23.104906, tolerance = 1e-07) expect_equal(simResults$accrualIntensity, c(12.380952, 24.761905, 24.761905), tolerance = 1e-07) expect_equal(simResults$lambda1, c(0.0225, 0.024, 0.027), tolerance = 1e-07) expect_equal(simResults$analysisTime[1, ], c(9.5194269, 8.9064508, 9.2223494), tolerance = 1e-07) expect_equal(simResults$analysisTime[2, ], c(27.784104, 21.407573, 24.009249), tolerance = 1e-07) expect_equal(simResults$analysisTime[3, ], c(55.740006, 35.109064, 70.374189), tolerance = 1e-07) expect_equal(simResults$studyDuration, c(34.589904, 24.072277, 24.081522), tolerance = 1e-07) expect_equal(simResults$eventsNotAchieved[1, ], c(0, 0, 0)) expect_equal(simResults$eventsNotAchieved[2, ], c(0, 0, 0)) expect_equal(simResults$eventsNotAchieved[3, ], c(0, 0, 0)) expect_equal(simResults$numberOfSubjects[1, ], c(198.3, 183, 190.8), tolerance = 1e-07) expect_equal(simResults$numberOfSubjects[2, ], c(260, 256, 260)) expect_equal(simResults$numberOfSubjects[3, ], c(260, 260, 260)) expect_equal(simResults$eventsPerStage[1, ], c(20, 20, 20)) expect_equal(simResults$eventsPerStage[2, ], c(85.333333, 64.833333, 77), tolerance = 1e-07) expect_equal(simResults$eventsPerStage[3, ], c(69.2, 57.8, 100), tolerance = 1e-07) expect_equal(simResults$overallEventsPerStage[1, ], c(20, 20, 20)) expect_equal(simResults$overallEventsPerStage[2, ], c(105.33333, 84.833333, 97), tolerance = 1e-07) expect_equal(simResults$overallEventsPerStage[3, ], c(174.53333, 142.63333, 197), tolerance = 1e-07) expect_equal(simResults$iterations[1, ], c(10, 10, 10)) expect_equal(simResults$iterations[2, ], c(6, 6, 4)) expect_equal(simResults$iterations[3, ], c(5, 5, 2)) expect_equal(simResults$overallReject, c(0.2, 0.2, 0.1), tolerance = 1e-07) expect_equal(simResults$rejectPerStage[1, ], c(0, 0, 0)) expect_equal(simResults$rejectPerStage[2, ], c(0.1, 0, 0.1), tolerance = 1e-07) expect_equal(simResults$rejectPerStage[3, ], c(0.1, 0.2, 0), tolerance = 1e-07) expect_equal(simResults$futilityStop, c(0.4, 0.5, 0.7), tolerance = 1e-07) expect_equal(simResults$futilityPerStage[1, ], c(0.4, 0.4, 0.6), tolerance = 1e-07) expect_equal(simResults$futilityPerStage[2, ], c(0, 0.1, 0.1), tolerance = 1e-07) expect_equal(simResults$earlyStop, c(0.5, 0.5, 0.8), tolerance = 1e-07) expect_equal(simResults$expectedNumberOfSubjects, c(235.32, 228.8, 218.48), tolerance = 1e-07) expect_equal(simResults$expectedNumberOfEvents, c(105.8, 87.8, 70.8), tolerance = 1e-07) expect_equal(simResults$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(simResults$conditionalPowerAchieved[2, ], c(0.27873409, 0.6677915, 0.61838719), tolerance = 1e-07) expect_equal(simResults$conditionalPowerAchieved[3, ], c(0.71007174, 0.61706552, 0.10609026), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResults), NA))) expect_output(print(simResults)$show()) invisible(capture.output(expect_error(summary(simResults), NA))) expect_output(summary(simResults)$show()) simResultsCodeBased <- eval(parse(text = getObjectRCode(simResults, stringWrapParagraphWidth = NULL))) expect_equal(simResultsCodeBased$pi1, simResults$pi1, tolerance = 1e-05) expect_equal(simResultsCodeBased$pi2, simResults$pi2, tolerance = 1e-05) expect_equal(simResultsCodeBased$median1, simResults$median1, tolerance = 1e-05) expect_equal(simResultsCodeBased$median2, simResults$median2, tolerance = 1e-05) expect_equal(simResultsCodeBased$accrualIntensity, simResults$accrualIntensity, tolerance = 1e-05) expect_equal(simResultsCodeBased$lambda1, simResults$lambda1, tolerance = 1e-05) expect_equal(simResultsCodeBased$analysisTime, simResults$analysisTime, tolerance = 1e-05) expect_equal(simResultsCodeBased$studyDuration, simResults$studyDuration, tolerance = 1e-05) expect_equal(simResultsCodeBased$eventsNotAchieved, simResults$eventsNotAchieved, tolerance = 1e-05) expect_equal(simResultsCodeBased$numberOfSubjects, simResults$numberOfSubjects, tolerance = 1e-05) expect_equal(simResultsCodeBased$eventsPerStage, simResults$eventsPerStage, tolerance = 1e-05) expect_equal(simResultsCodeBased$overallEventsPerStage, simResults$overallEventsPerStage, tolerance = 1e-05) expect_equal(simResultsCodeBased$iterations, simResults$iterations, tolerance = 1e-05) expect_equal(simResultsCodeBased$overallReject, simResults$overallReject, tolerance = 1e-05) expect_equal(simResultsCodeBased$rejectPerStage, simResults$rejectPerStage, tolerance = 1e-05) expect_equal(simResultsCodeBased$futilityStop, simResults$futilityStop, tolerance = 1e-05) expect_equal(simResultsCodeBased$futilityPerStage, simResults$futilityPerStage, tolerance = 1e-05) expect_equal(simResultsCodeBased$earlyStop, simResults$earlyStop, tolerance = 1e-05) expect_equal(simResultsCodeBased$expectedNumberOfSubjects, simResults$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResultsCodeBased$expectedNumberOfEvents, simResults$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResultsCodeBased$conditionalPowerAchieved, simResults$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResults), "character") df <- as.data.frame(simResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) rpact/tests/testthat/test-f_analysis_base_means.R0000644000176200001440000042466014370207346022014 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_analysis_base_means.R ## | Creation date: 06 February 2023, 12:04:19 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Analysis Means Functionality for One Treatment") test_that("'getAnalysisResults' for two-stage group sequential design and a dataset of one mean per stage (bindingFutility = FALSE)", { .skipTestIfDisabled() dataExample <- getDataset( n = 120, means = 0.45, stDevs = 1.3 ) design <- getDesignGroupSequential( kMax = 2, alpha = 0.025, futilityBounds = 0, bindingFutility = FALSE, typeOfDesign = "WT", deltaWT = 0.4 ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result <- getAnalysisResults( design = design, dataInput = dataExample, nPlanned = 130, thetaH1 = 0.22, assumedStDev = 1, thetaH0 = 0.25 ) ## Comparison of the results of AnalysisResultsGroupSequential object 'result' with expected results expect_equal(result$testActions, c("continue", NA_character_)) expect_equal(result$conditionalRejectionProbabilities, c(0.094509305, NA_real_), tolerance = 1e-07) expect_equal(result$conditionalPower, c(NA_real_, 0.048907456), tolerance = 1e-07) expect_equal(result$repeatedConfidenceIntervalLowerBounds, c(0.17801039, NA_real_), tolerance = 1e-07) expect_equal(result$repeatedConfidenceIntervalUpperBounds, c(0.7219894, NA_real_), tolerance = 1e-07) expect_equal(result$repeatedPValues, c(0.085336561, NA_real_), tolerance = 1e-07) expect_equal(result$finalStage, NA_integer_) expect_equal(result$finalPValues, c(NA_real_, NA_real_)) expect_equal(result$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_)) expect_equal(result$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_)) expect_equal(result$medianUnbiasedEstimates, c(NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result), NA))) expect_output(print(result)$show()) invisible(capture.output(expect_error(summary(result), NA))) expect_output(summary(result)$show()) resultCodeBased <- eval(parse(text = getObjectRCode(result, stringWrapParagraphWidth = NULL))) expect_equal(resultCodeBased$testActions, result$testActions, tolerance = 1e-05) expect_equal(resultCodeBased$conditionalRejectionProbabilities, result$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(resultCodeBased$conditionalPower, result$conditionalPower, tolerance = 1e-05) expect_equal(resultCodeBased$repeatedConfidenceIntervalLowerBounds, result$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(resultCodeBased$repeatedConfidenceIntervalUpperBounds, result$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(resultCodeBased$repeatedPValues, result$repeatedPValues, tolerance = 1e-05) expect_equal(resultCodeBased$finalStage, result$finalStage, tolerance = 1e-05) expect_equal(resultCodeBased$finalPValues, result$finalPValues, tolerance = 1e-05) expect_equal(resultCodeBased$finalConfidenceIntervalLowerBounds, result$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(resultCodeBased$finalConfidenceIntervalUpperBounds, result$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(resultCodeBased$medianUnbiasedEstimates, result$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result), "character") df <- as.data.frame(result) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' for three-stage group sequential design and a dataset of one mean per stage (bindingFutility = FALSE)", { .skipTestIfDisabled() dataExample <- getDataset( n = c(120, 130), means = c(0.45, 0.41) * 100, stDevs = c(1.3, 1.4) * 100 ) design <- getDesignGroupSequential( kMax = 3, alpha = 0.025, futilityBounds = rep(0.5244, 2), bindingFutility = FALSE, typeOfDesign = "WT", deltaWT = 0.4 ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result <- getAnalysisResults( design = design, dataInput = dataExample, nPlanned = 130, thetaH1 = 22, assumedStDev = 100, thetaH0 = 25 ) ## Comparison of the results of AnalysisResultsGroupSequential object 'result' with expected results expect_equal(result$testActions, c("continue", "continue", NA_character_)) expect_equal(result$conditionalRejectionProbabilities, c(0.10127313, 0.20204948, NA_real_), tolerance = 1e-07) expect_equal(result$conditionalPower, c(NA_real_, NA_real_, 0.11972239), tolerance = 1e-07) expect_equal(result$repeatedConfidenceIntervalLowerBounds, c(15.620913, 23.359338, NA_real_), tolerance = 1e-07) expect_equal(result$repeatedConfidenceIntervalUpperBounds, c(74.379087, 62.480662, NA_real_), tolerance = 1e-07) expect_equal(result$repeatedPValues, c(0.11501103, 0.039167372, NA_real_), tolerance = 1e-07) expect_equal(result$finalStage, NA_integer_) expect_equal(result$finalPValues, c(NA_real_, NA_real_, NA_real_)) expect_equal(result$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(result$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(result$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result), NA))) expect_output(print(result)$show()) invisible(capture.output(expect_error(summary(result), NA))) expect_output(summary(result)$show()) resultCodeBased <- eval(parse(text = getObjectRCode(result, stringWrapParagraphWidth = NULL))) expect_equal(resultCodeBased$testActions, result$testActions, tolerance = 1e-05) expect_equal(resultCodeBased$conditionalRejectionProbabilities, result$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(resultCodeBased$conditionalPower, result$conditionalPower, tolerance = 1e-05) expect_equal(resultCodeBased$repeatedConfidenceIntervalLowerBounds, result$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(resultCodeBased$repeatedConfidenceIntervalUpperBounds, result$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(resultCodeBased$repeatedPValues, result$repeatedPValues, tolerance = 1e-05) expect_equal(resultCodeBased$finalStage, result$finalStage, tolerance = 1e-05) expect_equal(resultCodeBased$finalPValues, result$finalPValues, tolerance = 1e-05) expect_equal(resultCodeBased$finalConfidenceIntervalLowerBounds, result$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(resultCodeBased$finalConfidenceIntervalUpperBounds, result$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(resultCodeBased$medianUnbiasedEstimates, result$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result), "character") df <- as.data.frame(result) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' for group sequential design and a dataset of one mean per stage (bindingFutility = TRUE)", { .skipTestIfDisabled() dataExample0 <- getDataset( n = c(120, 130, 130), means = c(0.45, 0.41, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design1 <- getDesignGroupSequential( kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4 ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result1 <- getAnalysisResults( design = design1, dataInput = dataExample0, nPlanned = 130, thetaH1 = 22, assumedStDev = 100, thetaH0 = 25 ) ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results expect_equal(result1$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(result1$conditionalRejectionProbabilities, c(0.11438278, 0.24787613, 0.68016764, NA_real_), tolerance = 1e-07) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.55017955), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(14.924587, 22.902668, 28.667333, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(75.075413, 62.937332, 58.595825, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.10271056, 0.041641198, 0.0060463294, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, 3) expect_equal(result1$finalPValues, c(NA_real_, NA_real_, 0.014723218, NA_real_), tolerance = 1e-07) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 26.836053, NA_real_), tolerance = 1e-07) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 56.851998, NA_real_), tolerance = 1e-07) expect_equal(result1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 42.083093, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result1), NA))) expect_output(print(result1)$show()) invisible(capture.output(expect_error(summary(result1), NA))) expect_output(summary(result1)$show()) result1CodeBased <- eval(parse(text = getObjectRCode(result1, stringWrapParagraphWidth = NULL))) expect_equal(result1CodeBased$testActions, result1$testActions, tolerance = 1e-05) expect_equal(result1CodeBased$conditionalRejectionProbabilities, result1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result1CodeBased$conditionalPower, result1$conditionalPower, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedConfidenceIntervalLowerBounds, result1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedConfidenceIntervalUpperBounds, result1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedPValues, result1$repeatedPValues, tolerance = 1e-05) expect_equal(result1CodeBased$finalStage, result1$finalStage, tolerance = 1e-05) expect_equal(result1CodeBased$finalPValues, result1$finalPValues, tolerance = 1e-05) expect_equal(result1CodeBased$finalConfidenceIntervalLowerBounds, result1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result1CodeBased$finalConfidenceIntervalUpperBounds, result1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result1CodeBased$medianUnbiasedEstimates, result1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result1), "character") df <- as.data.frame(result1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getStageResults' for group sequential design and a dataset of one mean per stage (bindingFutility = TRUE)", { .skipTestIfDisabled() dataExample1 <- getDataset( n = c(20, 30, 30), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design1 <- getDesignGroupSequential( kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4 ) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} stageResults1 <- getStageResults(design1, dataExample1, thetaH0 = 10, stage = 2) ## Comparison of the results of StageResultsMeans object 'stageResults1' with expected results expect_equal(stageResults1$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallMeans, c(45, 48.6, 47.25, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallStDevs, c(130, 134.76601, 128.66279, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallSampleSizes, c(20, 50, NA_real_, NA_real_)) expect_equal(stageResults1$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults1), NA))) expect_output(print(stageResults1)$show()) invisible(capture.output(expect_error(summary(stageResults1), NA))) expect_output(summary(stageResults1)$show()) stageResults1CodeBased <- eval(parse(text = getObjectRCode(stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(stageResults1CodeBased$overallTestStatistics, stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallPValues, stageResults1$overallPValues, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallMeans, stageResults1$overallMeans, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallStDevs, stageResults1$overallStDevs, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallSampleSizes, stageResults1$overallSampleSizes, tolerance = 1e-05) expect_equal(stageResults1CodeBased$testStatistics, stageResults1$testStatistics, tolerance = 1e-05) expect_equal(stageResults1CodeBased$pValues, stageResults1$pValues, tolerance = 1e-05) expect_equal(stageResults1CodeBased$effectSizes, stageResults1$effectSizes, tolerance = 1e-05) expect_type(names(stageResults1), "character") df <- as.data.frame(stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} plotData1 <- testGetStageResultsPlotData(stageResults1, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100 ) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData1$condPowerValues, c(0.20492816, 0.31007642, 0.43512091, 0.5683138, 0.6950205, 0.80243295, 0.88343665, 0.93770927, 0.96998259, 0.98700232, 0.99495733, 0.99825113, 0.99945881, 0.9998508, 0.9999634), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.12861339, 0.21139553, 0.32435073, 0.46456173, 0.62112851, 0.77522713, 0.90320416, 0.98231862, 0.99730568, 0.94517816, 0.83619688, 0.69057821, 0.53238607, 0.38313335, 0.25738469), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "Effect size") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 50, sd = 100") }) test_that("'getAnalysisResults' for inverse normal and Fisher designs and a dataset of one mean per stage (bindingFutility = TRUE)", { .skipTestIfDisabled() dataExample1 <- getDataset( n = c(20, 30, 30), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) design2 <- getDesignInverseNormal( kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = FALSE, typeOfDesign = "WT", deltaWT = 0.4 ) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} stageResults2 <- getStageResults(design2, dataExample1, thetaH0 = 10, stage = 2) ## Comparison of the results of StageResultsMeans object 'stageResults2' with expected results expect_equal(stageResults2$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallMeans, c(45, 48.6, 47.25, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallStDevs, c(130, 134.76601, 128.66279, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallSampleSizes, c(20, 50, NA_real_, NA_real_)) expect_equal(stageResults2$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$combInverseNormal, c(1.1666257, 1.9256836, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$weightsInverseNormal, c(0.5, 0.5, 0.5, 0.5), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults2), NA))) expect_output(print(stageResults2)$show()) invisible(capture.output(expect_error(summary(stageResults2), NA))) expect_output(summary(stageResults2)$show()) stageResults2CodeBased <- eval(parse(text = getObjectRCode(stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(stageResults2CodeBased$overallTestStatistics, stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallPValues, stageResults2$overallPValues, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallMeans, stageResults2$overallMeans, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallStDevs, stageResults2$overallStDevs, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallSampleSizes, stageResults2$overallSampleSizes, tolerance = 1e-05) expect_equal(stageResults2CodeBased$testStatistics, stageResults2$testStatistics, tolerance = 1e-05) expect_equal(stageResults2CodeBased$pValues, stageResults2$pValues, tolerance = 1e-05) expect_equal(stageResults2CodeBased$effectSizes, stageResults2$effectSizes, tolerance = 1e-05) expect_equal(stageResults2CodeBased$combInverseNormal, stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(stageResults2CodeBased$weightsInverseNormal, stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(stageResults2), "character") df <- as.data.frame(stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} plotData2 <- testGetStageResultsPlotData(stageResults2, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100 ) ## Comparison of the results of list object 'plotData2' with expected results expect_equal(plotData2$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData2$condPowerValues, c(0.16190431, 0.25577971, 0.37352079, 0.50571299, 0.6381983, 0.75647047, 0.85036513, 0.91657165, 0.95799515, 0.98097554, 0.99227303, 0.99719255, 0.99908935, 0.99973672, 0.99993224), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.12861339, 0.21139553, 0.32435073, 0.46456173, 0.62112851, 0.77522713, 0.90320416, 0.98231862, 0.99730568, 0.94517816, 0.83619688, 0.69057821, 0.53238607, 0.38313335, 0.25738469), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power with Likelihood") expect_equal(plotData2$xlab, "Effect size") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 50, sd = 100") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result2 <- getAnalysisResults( design = design2, dataInput = dataExample1, nPlanned = 30, thetaH1 = 50, assumedStDev = 100, thetaH0 = 10 ) ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results expect_equal(result2$testActions, c("continue", "continue", "reject and stop", NA_character_)) expect_equal(result2$conditionalRejectionProbabilities, c(0.046837862, 0.16190673, 0.42383694, NA_real_), tolerance = 1e-07) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.97718516), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-37.7517, 0.20066782, 12.631309, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(127.7517, 96.240714, 81.345632, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.28074785, 0.070627118, 0.016069426, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, 3) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, 0.015631623, NA_real_), tolerance = 1e-07) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 13.353451, NA_real_), tolerance = 1e-07) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 73.21831, NA_real_), tolerance = 1e-07) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, 44.191392, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result2), NA))) expect_output(print(result2)$show()) invisible(capture.output(expect_error(summary(result2), NA))) expect_output(summary(result2)$show()) result2CodeBased <- eval(parse(text = getObjectRCode(result2, stringWrapParagraphWidth = NULL))) expect_equal(result2CodeBased$testActions, result2$testActions, tolerance = 1e-05) expect_equal(result2CodeBased$conditionalRejectionProbabilities, result2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result2CodeBased$conditionalPower, result2$conditionalPower, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedConfidenceIntervalLowerBounds, result2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedConfidenceIntervalUpperBounds, result2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedPValues, result2$repeatedPValues, tolerance = 1e-05) expect_equal(result2CodeBased$finalStage, result2$finalStage, tolerance = 1e-05) expect_equal(result2CodeBased$finalPValues, result2$finalPValues, tolerance = 1e-05) expect_equal(result2CodeBased$finalConfidenceIntervalLowerBounds, result2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result2CodeBased$finalConfidenceIntervalUpperBounds, result2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result2CodeBased$medianUnbiasedEstimates, result2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result2), "character") df <- as.data.frame(result2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design3 <- getDesignFisher(kMax = 4, alpha = 0.025, alpha0Vec = rep(0.4, 3), bindingFutility = TRUE) # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} stageResults3 <- getStageResults(design3, dataExample1, thetaH0 = 10, stage = 2) ## Comparison of the results of StageResultsMeans object 'stageResults3' with expected results expect_equal(stageResults3$overallTestStatistics, c(1.2040366, 2.025312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallPValues, c(0.12168078, 0.02415027, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallMeans, c(45, 48.6, 47.25, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallStDevs, c(130, 134.76601, 128.66279, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallSampleSizes, c(20, 50, NA_real_, NA_real_)) expect_equal(stageResults3$testStatistics, c(1.2040366, 1.6040446, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$pValues, c(0.12168078, 0.059770605, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$effectSizes, c(45, 48.6, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$combFisher, c(0.12168078, 0.007272934, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$weightsFisher, c(1, 1, 1, 1)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults3), NA))) expect_output(print(stageResults3)$show()) invisible(capture.output(expect_error(summary(stageResults3), NA))) expect_output(summary(stageResults3)$show()) stageResults3CodeBased <- eval(parse(text = getObjectRCode(stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(stageResults3CodeBased$overallTestStatistics, stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallPValues, stageResults3$overallPValues, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallMeans, stageResults3$overallMeans, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallStDevs, stageResults3$overallStDevs, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallSampleSizes, stageResults3$overallSampleSizes, tolerance = 1e-05) expect_equal(stageResults3CodeBased$testStatistics, stageResults3$testStatistics, tolerance = 1e-05) expect_equal(stageResults3CodeBased$pValues, stageResults3$pValues, tolerance = 1e-05) expect_equal(stageResults3CodeBased$effectSizes, stageResults3$effectSizes, tolerance = 1e-05) expect_equal(stageResults3CodeBased$combFisher, stageResults3$combFisher, tolerance = 1e-05) expect_equal(stageResults3CodeBased$weightsFisher, stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(stageResults3), "character") df <- as.data.frame(stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result3 <- getAnalysisResults( design = design3, dataInput = dataExample1, thetaH0 = 10, nPlanned = 30, thetaH1 = 50, assumedStDev = 100, seed = 123456789 ) ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results expect_equal(result3$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result3$conditionalRejectionProbabilities, c(0.029249394, 0.067046868, 0.15552139, NA_real_), tolerance = 1e-07) expect_equal(result3$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.88057256), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-24.226675, 0.014834887, 8.7947814, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(114.22668, 96.713521, 85.125684, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.165096, 0.068572907, 0.029926287, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result3), NA))) expect_output(print(result3)$show()) invisible(capture.output(expect_error(summary(result3), NA))) expect_output(summary(result3)$show()) result3CodeBased <- eval(parse(text = getObjectRCode(result3, stringWrapParagraphWidth = NULL))) expect_equal(result3CodeBased$testActions, result3$testActions, tolerance = 1e-05) expect_equal(result3CodeBased$conditionalRejectionProbabilities, result3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result3CodeBased$conditionalPower, result3$conditionalPower, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedConfidenceIntervalLowerBounds, result3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedConfidenceIntervalUpperBounds, result3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedPValues, result3$repeatedPValues, tolerance = 1e-05) expect_equal(result3CodeBased$finalStage, result3$finalStage, tolerance = 1e-05) expect_equal(result3CodeBased$finalPValues, result3$finalPValues, tolerance = 1e-05) expect_equal(result3CodeBased$finalConfidenceIntervalLowerBounds, result3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result3CodeBased$finalConfidenceIntervalUpperBounds, result3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result3CodeBased$medianUnbiasedEstimates, result3$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result3), "character") df <- as.data.frame(result3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' for different designs and a dataset of one mean per stage (bindingFutility = FALSE)", { .skipTestIfDisabled() design4 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) dataExample2 <- getDataset( n = c(20, 20, 20), means = c(0.45, 0.51, 0.45) * 100, stDevs = c(1.3, 1.4, 1.2) * 100 ) stageResults1 <- getStageResults(design4, dataExample2, thetaH0 = 10, stage = 2) ## Comparison of the results of StageResultsMeans object 'stageResults1' with expected results expect_equal(stageResults1$overallTestStatistics, c(1.2040366, 1.8018141, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallPValues, c(0.12168078, 0.039654359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallMeans, c(45, 48, 47, NA_real_)) expect_equal(stageResults1$overallStDevs, c(130, 133.38396, 128.06116, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallSampleSizes, c(20, 40, NA_real_, NA_real_)) expect_equal(stageResults1$testStatistics, c(1.2040366, 1.309697, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$pValues, c(0.12168078, 0.10295724, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$effectSizes, c(45, 48, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults1), NA))) expect_output(print(stageResults1)$show()) invisible(capture.output(expect_error(summary(stageResults1), NA))) expect_output(summary(stageResults1)$show()) stageResults1CodeBased <- eval(parse(text = getObjectRCode(stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(stageResults1CodeBased$overallTestStatistics, stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallPValues, stageResults1$overallPValues, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallMeans, stageResults1$overallMeans, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallStDevs, stageResults1$overallStDevs, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallSampleSizes, stageResults1$overallSampleSizes, tolerance = 1e-05) expect_equal(stageResults1CodeBased$testStatistics, stageResults1$testStatistics, tolerance = 1e-05) expect_equal(stageResults1CodeBased$pValues, stageResults1$pValues, tolerance = 1e-05) expect_equal(stageResults1CodeBased$effectSizes, stageResults1$effectSizes, tolerance = 1e-05) expect_type(names(stageResults1), "character") df <- as.data.frame(stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetStageResultsPlotData(stageResults1, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100 ) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData1$condPowerValues, c(0.11518708, 0.19320212, 0.2981846, 0.42448846, 0.55999334, 0.68937861, 0.79916986, 0.8818727, 0.93712809, 0.96985063, 0.98701854, 0.99499503, 0.99827593, 0.99947032, 0.99985507), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.19725323, 0.29399425, 0.4142314, 0.5517428, 0.69473602, 0.8269751, 0.93058175, 0.98993369, 0.99551351, 0.94640644, 0.85054578, 0.72261535, 0.58037159, 0.44065083, 0.31628057), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "Effect size") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, # of remaining subjects = 50, sd = 100") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result1 <- getAnalysisResults(design = design4, dataInput = dataExample2, thetaH0 = 10) ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results expect_equal(result1$thetaH1, 47) expect_equal(result1$assumedStDev, 128.06116, tolerance = 1e-07) expect_equal(result1$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result1$conditionalRejectionProbabilities, c(0.046837862, 0.11518708, 0.2468754, NA_real_), tolerance = 1e-07) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(-37.7517, -4.7433931, 7.9671114, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(127.7517, 100.74339, 86.032888, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.28074785, 0.098382799, 0.033210734, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, NA_integer_) expect_equal(result1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result1), NA))) expect_output(print(result1)$show()) invisible(capture.output(expect_error(summary(result1), NA))) expect_output(summary(result1)$show()) result1CodeBased <- eval(parse(text = getObjectRCode(result1, stringWrapParagraphWidth = NULL))) expect_equal(result1CodeBased$thetaH1, result1$thetaH1, tolerance = 1e-05) expect_equal(result1CodeBased$assumedStDev, result1$assumedStDev, tolerance = 1e-05) expect_equal(result1CodeBased$testActions, result1$testActions, tolerance = 1e-05) expect_equal(result1CodeBased$conditionalRejectionProbabilities, result1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result1CodeBased$conditionalPower, result1$conditionalPower, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedConfidenceIntervalLowerBounds, result1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedConfidenceIntervalUpperBounds, result1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedPValues, result1$repeatedPValues, tolerance = 1e-05) expect_equal(result1CodeBased$finalStage, result1$finalStage, tolerance = 1e-05) expect_equal(result1CodeBased$finalPValues, result1$finalPValues, tolerance = 1e-05) expect_equal(result1CodeBased$finalConfidenceIntervalLowerBounds, result1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result1CodeBased$finalConfidenceIntervalUpperBounds, result1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result1CodeBased$medianUnbiasedEstimates, result1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result1), "character") df <- as.data.frame(result1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design5 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) stageResults2 <- getStageResults(design5, dataExample2, thetaH0 = 10, stage = 2) ## Comparison of the results of StageResultsMeans object 'stageResults2' with expected results expect_equal(stageResults2$overallTestStatistics, c(1.2040366, 1.8018141, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallPValues, c(0.12168078, 0.039654359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallMeans, c(45, 48, 47, NA_real_)) expect_equal(stageResults2$overallStDevs, c(130, 133.38396, 128.06116, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallSampleSizes, c(20, 40, NA_real_, NA_real_)) expect_equal(stageResults2$testStatistics, c(1.2040366, 1.309697, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$pValues, c(0.12168078, 0.10295724, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$effectSizes, c(45, 48, NA_real_, NA_real_)) expect_equal(stageResults2$combInverseNormal, c(1.1666257, 1.7193339, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$weightsInverseNormal, c(0.5, 0.5, 0.5, 0.5), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults2), NA))) expect_output(print(stageResults2)$show()) invisible(capture.output(expect_error(summary(stageResults2), NA))) expect_output(summary(stageResults2)$show()) stageResults2CodeBased <- eval(parse(text = getObjectRCode(stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(stageResults2CodeBased$overallTestStatistics, stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallPValues, stageResults2$overallPValues, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallMeans, stageResults2$overallMeans, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallStDevs, stageResults2$overallStDevs, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallSampleSizes, stageResults2$overallSampleSizes, tolerance = 1e-05) expect_equal(stageResults2CodeBased$testStatistics, stageResults2$testStatistics, tolerance = 1e-05) expect_equal(stageResults2CodeBased$pValues, stageResults2$pValues, tolerance = 1e-05) expect_equal(stageResults2CodeBased$effectSizes, stageResults2$effectSizes, tolerance = 1e-05) expect_equal(stageResults2CodeBased$combInverseNormal, stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(stageResults2CodeBased$weightsInverseNormal, stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(stageResults2), "character") df <- as.data.frame(stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData2 <- testGetStageResultsPlotData(stageResults2, stage = 2, nPlanned = c(30, 20), thetaRange = seq(10, 80, 5), assumedStDev = 100 ) ## Comparison of the results of list object 'plotData2' with expected results expect_equal(plotData2$xValues, c(10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80)) expect_equal(plotData2$condPowerValues, c(0.10694528, 0.18165277, 0.28365551, 0.40813694, 0.54357522, 0.6747028, 0.78751068, 0.8736511, 0.93198732, 0.96700264, 0.98562147, 0.9943885, 0.99804297, 0.99939119, 0.99983131), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.19725323, 0.29399425, 0.4142314, 0.5517428, 0.69473602, 0.8269751, 0.93058175, 0.98993369, 0.99551351, 0.94640644, 0.85054578, 0.72261535, 0.58037159, 0.44065083, 0.31628057), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power with Likelihood") expect_equal(plotData2$xlab, "Effect size") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, # of remaining subjects = 50, sd = 100") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCIOneMean} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerOneMeanEffect} result2 <- getAnalysisResults(design = design5, dataInput = dataExample2, thetaH0 = 10) ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results expect_equal(result2$thetaH1, 47) expect_equal(result2$assumedStDev, 128.06116, tolerance = 1e-07) expect_equal(result2$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result2$conditionalRejectionProbabilities, c(0.046837862, 0.10694527, 0.21929053, NA_real_), tolerance = 1e-07) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-37.7517, -5.8599359, 6.9798507, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(127.7517, 101.68482, 86.758637, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.28074785, 0.10502799, 0.037620516, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, NA_integer_) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result2), NA))) expect_output(print(result2)$show()) invisible(capture.output(expect_error(summary(result2), NA))) expect_output(summary(result2)$show()) result2CodeBased <- eval(parse(text = getObjectRCode(result2, stringWrapParagraphWidth = NULL))) expect_equal(result2CodeBased$thetaH1, result2$thetaH1, tolerance = 1e-05) expect_equal(result2CodeBased$assumedStDev, result2$assumedStDev, tolerance = 1e-05) expect_equal(result2CodeBased$testActions, result2$testActions, tolerance = 1e-05) expect_equal(result2CodeBased$conditionalRejectionProbabilities, result2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result2CodeBased$conditionalPower, result2$conditionalPower, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedConfidenceIntervalLowerBounds, result2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedConfidenceIntervalUpperBounds, result2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedPValues, result2$repeatedPValues, tolerance = 1e-05) expect_equal(result2CodeBased$finalStage, result2$finalStage, tolerance = 1e-05) expect_equal(result2CodeBased$finalPValues, result2$finalPValues, tolerance = 1e-05) expect_equal(result2CodeBased$finalConfidenceIntervalLowerBounds, result2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result2CodeBased$finalConfidenceIntervalUpperBounds, result2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result2CodeBased$medianUnbiasedEstimates, result2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result2), "character") df <- as.data.frame(result2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design6 <- getDesignFisher(kMax = 4, alpha = 0.025) stageResults3 <- getStageResults(design6, dataExample2, thetaH0 = 10, stage = 2) ## Comparison of the results of StageResultsMeans object 'stageResults3' with expected results expect_equal(stageResults3$overallTestStatistics, c(1.2040366, 1.8018141, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallPValues, c(0.12168078, 0.039654359, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallMeans, c(45, 48, 47, NA_real_)) expect_equal(stageResults3$overallStDevs, c(130, 133.38396, 128.06116, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$overallSampleSizes, c(20, 40, NA_real_, NA_real_)) expect_equal(stageResults3$testStatistics, c(1.2040366, 1.309697, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$pValues, c(0.12168078, 0.10295724, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$effectSizes, c(45, 48, NA_real_, NA_real_)) expect_equal(stageResults3$combFisher, c(0.12168078, 0.012527917, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults3$weightsFisher, c(1, 1, 1, 1)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults3), NA))) expect_output(print(stageResults3)$show()) invisible(capture.output(expect_error(summary(stageResults3), NA))) expect_output(summary(stageResults3)$show()) stageResults3CodeBased <- eval(parse(text = getObjectRCode(stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(stageResults3CodeBased$overallTestStatistics, stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallPValues, stageResults3$overallPValues, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallMeans, stageResults3$overallMeans, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallStDevs, stageResults3$overallStDevs, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallSampleSizes, stageResults3$overallSampleSizes, tolerance = 1e-05) expect_equal(stageResults3CodeBased$testStatistics, stageResults3$testStatistics, tolerance = 1e-05) expect_equal(stageResults3CodeBased$pValues, stageResults3$pValues, tolerance = 1e-05) expect_equal(stageResults3CodeBased$effectSizes, stageResults3$effectSizes, tolerance = 1e-05) expect_equal(stageResults3CodeBased$combFisher, stageResults3$combFisher, tolerance = 1e-05) expect_equal(stageResults3CodeBased$weightsFisher, stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(stageResults3), "character") df <- as.data.frame(stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result3 <- getAnalysisResults( design = design6, dataInput = dataExample2, stage = 2, thetaH0 = 10, nPlanned = c(30, 20), thetaH1 = 50, assumedStDev = 100, iterations = 800, seed = 31082018 ) ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results expect_equal(result3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result3$conditionalRejectionProbabilities, c(0.026695414, 0.033302173, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-28.274837, -9.0994871, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(118.27484, 104.78379, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.23830752, 0.14118934, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.54125, 0.8125), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result3), NA))) expect_output(print(result3)$show()) invisible(capture.output(expect_error(summary(result3), NA))) expect_output(summary(result3)$show()) result3CodeBased <- eval(parse(text = getObjectRCode(result3, stringWrapParagraphWidth = NULL))) expect_equal(result3CodeBased$testActions, result3$testActions, tolerance = 1e-05) expect_equal(result3CodeBased$conditionalRejectionProbabilities, result3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedConfidenceIntervalLowerBounds, result3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedConfidenceIntervalUpperBounds, result3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedPValues, result3$repeatedPValues, tolerance = 1e-05) expect_equal(result3CodeBased$finalStage, result3$finalStage, tolerance = 1e-05) expect_equal(result3CodeBased$finalPValues, result3$finalPValues, tolerance = 1e-05) expect_equal(result3CodeBased$finalConfidenceIntervalLowerBounds, result3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result3CodeBased$finalConfidenceIntervalUpperBounds, result3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result3CodeBased$medianUnbiasedEstimates, result3$medianUnbiasedEstimates, tolerance = 1e-05) expect_equal(result3CodeBased$conditionalPowerSimulated, result3$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(result3), "character") df <- as.data.frame(result3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing the Analysis Means Functionality for Two Treatments") test_that("'getAnalysisResults' for a Fisher design and a dataset of two means per stage", { .skipTestIfDisabled() # note: if third stage value of means1 (4.5) increases, lower bound of RCI does not increase design7 <- getDesignFisher(kMax = 4, informationRates = c(0.2, 0.5, 0.9, 1), alpha = 0.05, alpha0Vec = rep(0.4, 3)) dataExample3 <- getDataset( n1 = c(23, 13, 22), n2 = c(22, 11, 22), means1 = c(1, 1.1, 1.3) * 100, means2 = c(1.3, 1.4, 2.5) * 100, stds1 = c(1.3, 2.4, 2.2) * 100, stds2 = c(1.2, 2.2, 2.1) * 100 ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:definitionRCIwithFutilityFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result <- getAnalysisResults( design = design7, dataInput = dataExample3, equalVariances = TRUE, thetaH0 = 0, directionUpper = FALSE, seed = 123456789 ) ## Comparison of the results of AnalysisResultsFisher object 'result' with expected results expect_equal(result$thetaH1, -66.37931, tolerance = 1e-07) expect_equal(result$assumedStDev, 189.41921, tolerance = 1e-07) expect_equal(result$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result$conditionalRejectionProbabilities, c(0.044249457, 0.020976199, 0.060555322, NA_real_), tolerance = 1e-07) expect_equal(result$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result$repeatedConfidenceIntervalLowerBounds, c(-102.25178, -110.95946, -128.224, NA_real_), tolerance = 1e-07) expect_equal(result$repeatedConfidenceIntervalUpperBounds, c(42.251781, 50.959457, 11.069379, NA_real_), tolerance = 1e-07) expect_equal(result$repeatedPValues, c(0.25752784, 0.32556092, 0.088271965, NA_real_), tolerance = 1e-07) expect_equal(result$finalStage, NA_integer_) expect_equal(result$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result), NA))) expect_output(print(result)$show()) invisible(capture.output(expect_error(summary(result), NA))) expect_output(summary(result)$show()) resultCodeBased <- eval(parse(text = getObjectRCode(result, stringWrapParagraphWidth = NULL))) expect_equal(resultCodeBased$thetaH1, result$thetaH1, tolerance = 1e-05) expect_equal(resultCodeBased$assumedStDev, result$assumedStDev, tolerance = 1e-05) expect_equal(resultCodeBased$testActions, result$testActions, tolerance = 1e-05) expect_equal(resultCodeBased$conditionalRejectionProbabilities, result$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(resultCodeBased$conditionalPower, result$conditionalPower, tolerance = 1e-05) expect_equal(resultCodeBased$repeatedConfidenceIntervalLowerBounds, result$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(resultCodeBased$repeatedConfidenceIntervalUpperBounds, result$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(resultCodeBased$repeatedPValues, result$repeatedPValues, tolerance = 1e-05) expect_equal(resultCodeBased$finalStage, result$finalStage, tolerance = 1e-05) expect_equal(resultCodeBased$finalPValues, result$finalPValues, tolerance = 1e-05) expect_equal(resultCodeBased$finalConfidenceIntervalLowerBounds, result$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(resultCodeBased$finalConfidenceIntervalUpperBounds, result$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(resultCodeBased$medianUnbiasedEstimates, result$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result), "character") df <- as.data.frame(result) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' for a group sequential design and a dataset of two means per stage, stages: default, 2, 3, and 4", { .skipTestIfDisabled() dataExample4 <- getDataset( n1 = c(23, 23, 22, 23), n2 = c(22, 22, 22, 21), means1 = c(1.7, 1.5, 1.8, 2.5) * 100, means2 = c(1, 1.1, 1.3, 1) * 100, stds1 = c(1.3, 2.4, 2.2, 1.3) * 100, stds2 = c(1.2, 2.2, 2.1, 1.3) * 100 ) design8 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result1 <- getAnalysisResults( design = design8, dataInput = dataExample4, equalVariances = TRUE, stage = 2, nPlanned = c(15, 15), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results expect_equal(result1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result1$conditionalRejectionProbabilities, c(0.12319684, 0.052938347, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, 0.65019157, 0.95040435), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(-30.185323, -39.416167, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(170.18532, 149.41617, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.10782416, 0.1777417, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, NA_integer_) expect_equal(result1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result1), NA))) expect_output(print(result1)$show()) invisible(capture.output(expect_error(summary(result1), NA))) expect_output(summary(result1)$show()) result1CodeBased <- eval(parse(text = getObjectRCode(result1, stringWrapParagraphWidth = NULL))) expect_equal(result1CodeBased$testActions, result1$testActions, tolerance = 1e-05) expect_equal(result1CodeBased$conditionalRejectionProbabilities, result1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result1CodeBased$conditionalPower, result1$conditionalPower, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedConfidenceIntervalLowerBounds, result1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedConfidenceIntervalUpperBounds, result1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedPValues, result1$repeatedPValues, tolerance = 1e-05) expect_equal(result1CodeBased$finalStage, result1$finalStage, tolerance = 1e-05) expect_equal(result1CodeBased$finalPValues, result1$finalPValues, tolerance = 1e-05) expect_equal(result1CodeBased$finalConfidenceIntervalLowerBounds, result1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result1CodeBased$finalConfidenceIntervalUpperBounds, result1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result1CodeBased$medianUnbiasedEstimates, result1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result1), "character") df <- as.data.frame(result1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result4 <- getAnalysisResults( design = design8, dataInput = dataExample4, equalVariances = TRUE, stage = 3, nPlanned = 15, thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsGroupSequential object 'result4' with expected results expect_equal(result4$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result4$conditionalRejectionProbabilities, c(0.12319684, 0.052938347, 0.042196066, NA_real_), tolerance = 1e-07) expect_equal(result4$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.74141468), tolerance = 1e-07) expect_equal(result4$repeatedConfidenceIntervalLowerBounds, c(-30.185323, -39.416167, -24.461261, NA_real_), tolerance = 1e-07) expect_equal(result4$repeatedConfidenceIntervalUpperBounds, c(170.18532, 149.41617, 130.73577, NA_real_), tolerance = 1e-07) expect_equal(result4$repeatedPValues, c(0.10782416, 0.1777417, 0.11951427, NA_real_), tolerance = 1e-07) expect_equal(result4$finalStage, NA_integer_) expect_equal(result4$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result4$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result4$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result4$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result4), NA))) expect_output(print(result4)$show()) invisible(capture.output(expect_error(summary(result4), NA))) expect_output(summary(result4)$show()) result4CodeBased <- eval(parse(text = getObjectRCode(result4, stringWrapParagraphWidth = NULL))) expect_equal(result4CodeBased$testActions, result4$testActions, tolerance = 1e-05) expect_equal(result4CodeBased$conditionalRejectionProbabilities, result4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result4CodeBased$conditionalPower, result4$conditionalPower, tolerance = 1e-05) expect_equal(result4CodeBased$repeatedConfidenceIntervalLowerBounds, result4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result4CodeBased$repeatedConfidenceIntervalUpperBounds, result4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result4CodeBased$repeatedPValues, result4$repeatedPValues, tolerance = 1e-05) expect_equal(result4CodeBased$finalStage, result4$finalStage, tolerance = 1e-05) expect_equal(result4CodeBased$finalPValues, result4$finalPValues, tolerance = 1e-05) expect_equal(result4CodeBased$finalConfidenceIntervalLowerBounds, result4$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result4CodeBased$finalConfidenceIntervalUpperBounds, result4$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result4CodeBased$medianUnbiasedEstimates, result4$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result4), "character") df <- as.data.frame(result4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result7 <- getAnalysisResults( design = design8, dataInput = dataExample4, equalVariances = TRUE, stage = 4, nPlanned = numeric(0), thetaH0 = 0 ) ## Comparison of the results of AnalysisResultsGroupSequential object 'result7' with expected results expect_equal(result7$thetaH1, 77.467475, tolerance = 1e-07) expect_equal(result7$assumedStDev, 180.80733, tolerance = 1e-07) expect_equal(result7$testActions, c("continue", "continue", "continue", "reject")) expect_equal(result7$conditionalRejectionProbabilities, c(0.12319684, 0.052938347, 0.042196066, NA_real_), tolerance = 1e-07) expect_equal(result7$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result7$repeatedConfidenceIntervalLowerBounds, c(-30.185323, -39.416167, -24.461261, 16.408896), tolerance = 1e-07) expect_equal(result7$repeatedConfidenceIntervalUpperBounds, c(170.18532, 149.41617, 130.73577, 138.52605), tolerance = 1e-07) expect_equal(result7$repeatedPValues, c(0.10782416, 0.1777417, 0.11951427, 0.0045471564), tolerance = 1e-07) expect_equal(result7$finalStage, 4) expect_equal(result7$finalPValues, c(NA_real_, NA_real_, NA_real_, 0.019111276), tolerance = 1e-07) expect_equal(result7$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, 3.8518991), tolerance = 1e-07) expect_equal(result7$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, 122.8312), tolerance = 1e-07) expect_equal(result7$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, 65.8091), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result7), NA))) expect_output(print(result7)$show()) invisible(capture.output(expect_error(summary(result7), NA))) expect_output(summary(result7)$show()) result7CodeBased <- eval(parse(text = getObjectRCode(result7, stringWrapParagraphWidth = NULL))) expect_equal(result7CodeBased$thetaH1, result7$thetaH1, tolerance = 1e-05) expect_equal(result7CodeBased$assumedStDev, result7$assumedStDev, tolerance = 1e-05) expect_equal(result7CodeBased$testActions, result7$testActions, tolerance = 1e-05) expect_equal(result7CodeBased$conditionalRejectionProbabilities, result7$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result7CodeBased$conditionalPower, result7$conditionalPower, tolerance = 1e-05) expect_equal(result7CodeBased$repeatedConfidenceIntervalLowerBounds, result7$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result7CodeBased$repeatedConfidenceIntervalUpperBounds, result7$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result7CodeBased$repeatedPValues, result7$repeatedPValues, tolerance = 1e-05) expect_equal(result7CodeBased$finalStage, result7$finalStage, tolerance = 1e-05) expect_equal(result7CodeBased$finalPValues, result7$finalPValues, tolerance = 1e-05) expect_equal(result7CodeBased$finalConfidenceIntervalLowerBounds, result7$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result7CodeBased$finalConfidenceIntervalUpperBounds, result7$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result7CodeBased$medianUnbiasedEstimates, result7$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result7), "character") df <- as.data.frame(result7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' for an inverse normal design and a dataset of two means per stage, stages: default, 2, 3, and 4", { .skipTestIfDisabled() dataExample5 <- getDataset( n1 = c(23, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(1.7, 1.5, 1.8, 2.5) * 100, means2 = c(1, 1.1, 1.3, 1) * 100, stds1 = c(1.3, 2.4, 2.2, 1.3) * 100, stds2 = c(1.2, 2.2, 2.1, 1.3) * 100 ) design9 <- getDesignInverseNormal(kMax = 4, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.4) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterUnequalVariances} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result2 <- getAnalysisResults( design = design9, dataInput = dataExample5, equalVariances = FALSE, stage = 2, nPlanned = c(15, 15), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results expect_equal(result2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result2$conditionalRejectionProbabilities, c(0.12372016, 0.08089089, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, 0.7399771, 0.96741599), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-30.008991, -32.585516, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(170.00899, 154.76457, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.10725005, 0.13184907, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, NA_integer_) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result2), NA))) expect_output(print(result2)$show()) invisible(capture.output(expect_error(summary(result2), NA))) expect_output(summary(result2)$show()) result2CodeBased <- eval(parse(text = getObjectRCode(result2, stringWrapParagraphWidth = NULL))) expect_equal(result2CodeBased$testActions, result2$testActions, tolerance = 1e-05) expect_equal(result2CodeBased$conditionalRejectionProbabilities, result2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result2CodeBased$conditionalPower, result2$conditionalPower, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedConfidenceIntervalLowerBounds, result2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedConfidenceIntervalUpperBounds, result2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedPValues, result2$repeatedPValues, tolerance = 1e-05) expect_equal(result2CodeBased$finalStage, result2$finalStage, tolerance = 1e-05) expect_equal(result2CodeBased$finalPValues, result2$finalPValues, tolerance = 1e-05) expect_equal(result2CodeBased$finalConfidenceIntervalLowerBounds, result2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result2CodeBased$finalConfidenceIntervalUpperBounds, result2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result2CodeBased$medianUnbiasedEstimates, result2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result2), "character") df <- as.data.frame(result2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticDifferenceMeansUnequalVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterUnequalVariances} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result5 <- getAnalysisResults( design = design9, dataInput = dataExample5, equalVariances = FALSE, stage = 3, nPlanned = 15, thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsInverseNormal object 'result5' with expected results expect_equal(result5$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result5$conditionalRejectionProbabilities, c(0.12372016, 0.08089089, 0.073275512, NA_real_), tolerance = 1e-07) expect_equal(result5$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.82164236), tolerance = 1e-07) expect_equal(result5$repeatedConfidenceIntervalLowerBounds, c(-30.008991, -32.585516, -19.230333, NA_real_), tolerance = 1e-07) expect_equal(result5$repeatedConfidenceIntervalUpperBounds, c(170.00899, 154.76457, 134.96564, NA_real_), tolerance = 1e-07) expect_equal(result5$repeatedPValues, c(0.10725005, 0.13184907, 0.088247169, NA_real_), tolerance = 1e-07) expect_equal(result5$finalStage, NA_integer_) expect_equal(result5$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result5$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result5$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result5$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result5), NA))) expect_output(print(result5)$show()) invisible(capture.output(expect_error(summary(result5), NA))) expect_output(summary(result5)$show()) result5CodeBased <- eval(parse(text = getObjectRCode(result5, stringWrapParagraphWidth = NULL))) expect_equal(result5CodeBased$testActions, result5$testActions, tolerance = 1e-05) expect_equal(result5CodeBased$conditionalRejectionProbabilities, result5$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result5CodeBased$conditionalPower, result5$conditionalPower, tolerance = 1e-05) expect_equal(result5CodeBased$repeatedConfidenceIntervalLowerBounds, result5$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result5CodeBased$repeatedConfidenceIntervalUpperBounds, result5$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result5CodeBased$repeatedPValues, result5$repeatedPValues, tolerance = 1e-05) expect_equal(result5CodeBased$finalStage, result5$finalStage, tolerance = 1e-05) expect_equal(result5CodeBased$finalPValues, result5$finalPValues, tolerance = 1e-05) expect_equal(result5CodeBased$finalConfidenceIntervalLowerBounds, result5$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result5CodeBased$finalConfidenceIntervalUpperBounds, result5$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result5CodeBased$medianUnbiasedEstimates, result5$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result5), "character") df <- as.data.frame(result5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticDifferenceMeansUnequalVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterUnequalVariances} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} result8 <- getAnalysisResults( design = design9, dataInput = dataExample5, equalVariances = FALSE, stage = 4, nPlanned = numeric(0), thetaH0 = 0 ) ## Comparison of the results of AnalysisResultsInverseNormal object 'result8' with expected results expect_equal(result8$thetaH1, 72.41784, tolerance = 1e-07) expect_equal(result8$assumedStDev, 177.47472, tolerance = 1e-07) expect_equal(result8$testActions, c("continue", "continue", "continue", "reject")) expect_equal(result8$conditionalRejectionProbabilities, c(0.12372016, 0.08089089, 0.073275512, NA_real_), tolerance = 1e-07) expect_equal(result8$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result8$repeatedConfidenceIntervalLowerBounds, c(-30.008991, -32.585516, -19.230333, 16.862491), tolerance = 1e-07) expect_equal(result8$repeatedConfidenceIntervalUpperBounds, c(170.00899, 154.76457, 134.96564, 146.10543), tolerance = 1e-07) expect_equal(result8$repeatedPValues, c(0.10725005, 0.13184907, 0.088247169, 0.0050030118), tolerance = 1e-07) expect_equal(result8$finalStage, 4) expect_equal(result8$finalPValues, c(NA_real_, NA_real_, NA_real_, 0.019192988), tolerance = 1e-07) expect_equal(result8$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, 4.0866331), tolerance = 1e-07) expect_equal(result8$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, 135.35066), tolerance = 1e-07) expect_equal(result8$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, 71.819794), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result8), NA))) expect_output(print(result8)$show()) invisible(capture.output(expect_error(summary(result8), NA))) expect_output(summary(result8)$show()) result8CodeBased <- eval(parse(text = getObjectRCode(result8, stringWrapParagraphWidth = NULL))) expect_equal(result8CodeBased$thetaH1, result8$thetaH1, tolerance = 1e-05) expect_equal(result8CodeBased$assumedStDev, result8$assumedStDev, tolerance = 1e-05) expect_equal(result8CodeBased$testActions, result8$testActions, tolerance = 1e-05) expect_equal(result8CodeBased$conditionalRejectionProbabilities, result8$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result8CodeBased$conditionalPower, result8$conditionalPower, tolerance = 1e-05) expect_equal(result8CodeBased$repeatedConfidenceIntervalLowerBounds, result8$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result8CodeBased$repeatedConfidenceIntervalUpperBounds, result8$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result8CodeBased$repeatedPValues, result8$repeatedPValues, tolerance = 1e-05) expect_equal(result8CodeBased$finalStage, result8$finalStage, tolerance = 1e-05) expect_equal(result8CodeBased$finalPValues, result8$finalPValues, tolerance = 1e-05) expect_equal(result8CodeBased$finalConfidenceIntervalLowerBounds, result8$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result8CodeBased$finalConfidenceIntervalUpperBounds, result8$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result8CodeBased$medianUnbiasedEstimates, result8$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result8), "character") df <- as.data.frame(result8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' for a Fisher design and a dataset of two means per stage, stages: default, 2, 3, and 4", { .skipTestIfDisabled() informationRates <- c(0.2, 0.5, 0.8, 1) dataExample6 <- getDataset( n1 = c(23, 13, 22, 13), n2 = c(22, 11, 22, 11), means1 = c(1.7, 1.5, 1.8, 2.5) * 100, means2 = c(1, 1.1, 1.3, 1) * 100, stds1 = c(1.3, 2.4, 2.2, 1.3) * 100, stds2 = c(1.2, 2.2, 2.1, 1.3) * 100 ) design10 <- getDesignFisher( kMax = 4, alpha = 0.035, informationRates = informationRates ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result3 <- getAnalysisResults( design = design10, dataInput = dataExample6, equalVariances = TRUE, stage = 2, nPlanned = c(18, 12), thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2, seed = 123456789 ) ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results expect_equal(result3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result3$conditionalRejectionProbabilities, c(0.092626641, 0.040500778, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-14.62622, -29.188312, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(154.62622, 155.99339, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.078061948, 0.16270991, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.734, 0.933), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result3), NA))) expect_output(print(result3)$show()) invisible(capture.output(expect_error(summary(result3), NA))) expect_output(summary(result3)$show()) result3CodeBased <- eval(parse(text = getObjectRCode(result3, stringWrapParagraphWidth = NULL))) expect_equal(result3CodeBased$testActions, result3$testActions, tolerance = 1e-05) expect_equal(result3CodeBased$conditionalRejectionProbabilities, result3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedConfidenceIntervalLowerBounds, result3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedConfidenceIntervalUpperBounds, result3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedPValues, result3$repeatedPValues, tolerance = 1e-05) expect_equal(result3CodeBased$finalStage, result3$finalStage, tolerance = 1e-05) expect_equal(result3CodeBased$finalPValues, result3$finalPValues, tolerance = 1e-05) expect_equal(result3CodeBased$finalConfidenceIntervalLowerBounds, result3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result3CodeBased$finalConfidenceIntervalUpperBounds, result3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result3CodeBased$medianUnbiasedEstimates, result3$medianUnbiasedEstimates, tolerance = 1e-05) expect_equal(result3CodeBased$conditionalPowerSimulated, result3$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(result3), "character") df <- as.data.frame(result3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result6 <- getAnalysisResults( design = design10, dataInput = dataExample6, equalVariances = TRUE, stage = 3, nPlanned = 15, thetaH0 = 0, thetaH1 = 130, assumedStDev = 100, allocationRatioPlanned = 2, seed = 123456789 ) ## Comparison of the results of AnalysisResultsFisher object 'result6' with expected results expect_equal(result6$testActions, c("continue", "continue", "continue", NA_character_)) expect_equal(result6$conditionalRejectionProbabilities, c(0.092626641, 0.040500778, 0.016148337, NA_real_), tolerance = 1e-07) expect_equal(result6$conditionalPower, c(NA_real_, NA_real_, NA_real_, 0.5920203), tolerance = 1e-07) expect_equal(result6$repeatedConfidenceIntervalLowerBounds, c(-14.62622, -29.188312, -25.34531, NA_real_), tolerance = 1e-07) expect_equal(result6$repeatedConfidenceIntervalUpperBounds, c(154.62622, 155.99339, 144.38935, NA_real_), tolerance = 1e-07) expect_equal(result6$repeatedPValues, c(0.078061948, 0.16270991, 0.16485567, NA_real_), tolerance = 1e-07) expect_equal(result6$finalStage, NA_integer_) expect_equal(result6$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result6$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result6$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result6$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result6), NA))) expect_output(print(result6)$show()) invisible(capture.output(expect_error(summary(result6), NA))) expect_output(summary(result6)$show()) result6CodeBased <- eval(parse(text = getObjectRCode(result6, stringWrapParagraphWidth = NULL))) expect_equal(result6CodeBased$testActions, result6$testActions, tolerance = 1e-05) expect_equal(result6CodeBased$conditionalRejectionProbabilities, result6$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result6CodeBased$conditionalPower, result6$conditionalPower, tolerance = 1e-05) expect_equal(result6CodeBased$repeatedConfidenceIntervalLowerBounds, result6$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result6CodeBased$repeatedConfidenceIntervalUpperBounds, result6$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result6CodeBased$repeatedPValues, result6$repeatedPValues, tolerance = 1e-05) expect_equal(result6CodeBased$finalStage, result6$finalStage, tolerance = 1e-05) expect_equal(result6CodeBased$finalPValues, result6$finalPValues, tolerance = 1e-05) expect_equal(result6CodeBased$finalConfidenceIntervalLowerBounds, result6$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result6CodeBased$finalConfidenceIntervalUpperBounds, result6$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result6CodeBased$medianUnbiasedEstimates, result6$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result6), "character") df <- as.data.frame(result6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalPValueFisherCombinationTest} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} result9 <- getAnalysisResults( design = design10, dataInput = dataExample6, equalVariances = TRUE, stage = 4, nPlanned = numeric(0), thetaH0 = 0, seed = 123456789 ) ## Comparison of the results of AnalysisResultsFisher object 'result9' with expected results expect_equal(result9$thetaH1, 72.41784, tolerance = 1e-07) expect_equal(result9$assumedStDev, 177.47472, tolerance = 1e-07) expect_equal(result9$testActions, c("continue", "continue", "continue", "reject")) expect_equal(result9$conditionalRejectionProbabilities, c(0.092626641, 0.040500778, 0.016148337, NA_real_), tolerance = 1e-07) expect_equal(result9$conditionalPower, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$repeatedConfidenceIntervalLowerBounds, c(-14.62622, -29.188312, -25.34531, 8.7533154), tolerance = 1e-07) expect_equal(result9$repeatedConfidenceIntervalUpperBounds, c(154.62622, 155.99339, 144.38935, 151.28694), tolerance = 1e-07) expect_equal(result9$repeatedPValues, c(0.078061948, 0.16270991, 0.16485567, 0.017103207), tolerance = 1e-07) expect_equal(result9$finalStage, NA_integer_) expect_equal(result9$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result9$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result9), NA))) expect_output(print(result9)$show()) invisible(capture.output(expect_error(summary(result9), NA))) expect_output(summary(result9)$show()) result9CodeBased <- eval(parse(text = getObjectRCode(result9, stringWrapParagraphWidth = NULL))) expect_equal(result9CodeBased$thetaH1, result9$thetaH1, tolerance = 1e-05) expect_equal(result9CodeBased$assumedStDev, result9$assumedStDev, tolerance = 1e-05) expect_equal(result9CodeBased$testActions, result9$testActions, tolerance = 1e-05) expect_equal(result9CodeBased$conditionalRejectionProbabilities, result9$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result9CodeBased$conditionalPower, result9$conditionalPower, tolerance = 1e-05) expect_equal(result9CodeBased$repeatedConfidenceIntervalLowerBounds, result9$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result9CodeBased$repeatedConfidenceIntervalUpperBounds, result9$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result9CodeBased$repeatedPValues, result9$repeatedPValues, tolerance = 1e-05) expect_equal(result9CodeBased$finalStage, result9$finalStage, tolerance = 1e-05) expect_equal(result9CodeBased$finalPValues, result9$finalPValues, tolerance = 1e-05) expect_equal(result9CodeBased$finalConfidenceIntervalLowerBounds, result9$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result9CodeBased$finalConfidenceIntervalUpperBounds, result9$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result9CodeBased$medianUnbiasedEstimates, result9$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result9), "character") df <- as.data.frame(result9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Check that the conditional power is as expected for different designs and datasets", { .skipTestIfDisabled() informationRates <- c(0.2, 0.5, 0.8, 1) dataExample7 <- getDataset( n1 = c(22, 33, 31, 13), n2 = c(22, 31, 30, 11), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 1, 2.5), stds1 = c(1, 2, 2, 1.3), stds2 = c(1, 2, 2, 1.3) ) design11 <- getDesignGroupSequential( kMax = 4, alpha = 0.025, informationRates = informationRates, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.45 ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeSmallerEqualVariances} # @refFS[Formula]{fs:testStatisticGroupSequential} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueUpper} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result1 <- getAnalysisResults( design = design11, dataInput = dataExample7, equalVariances = TRUE, directionUpper = FALSE, stage = 2, thetaH0 = 0.2, thetaH1 = -0.2, nPlanned = c(96, 64), allocationRatioPlanned = 3, normalApproximation = FALSE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'result1' with expected results expect_equal(result1$assumedStDev, 1.6547835, tolerance = 1e-07) expect_equal(result1$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result1$conditionalRejectionProbabilities, c(0.13790633, 0.14848468, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$conditionalPower, c(NA_real_, NA_real_, 0.40521176, 0.57857102), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalLowerBounds, c(-1.1558731, -1.1414911, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedConfidenceIntervalUpperBounds, c(0.35587299, 0.34450997, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$repeatedPValues, c(0.06267349, 0.061334534, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result1$finalStage, NA_integer_) expect_equal(result1$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result1), NA))) expect_output(print(result1)$show()) invisible(capture.output(expect_error(summary(result1), NA))) expect_output(summary(result1)$show()) result1CodeBased <- eval(parse(text = getObjectRCode(result1, stringWrapParagraphWidth = NULL))) expect_equal(result1CodeBased$assumedStDev, result1$assumedStDev, tolerance = 1e-05) expect_equal(result1CodeBased$testActions, result1$testActions, tolerance = 1e-05) expect_equal(result1CodeBased$conditionalRejectionProbabilities, result1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result1CodeBased$conditionalPower, result1$conditionalPower, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedConfidenceIntervalLowerBounds, result1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedConfidenceIntervalUpperBounds, result1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result1CodeBased$repeatedPValues, result1$repeatedPValues, tolerance = 1e-05) expect_equal(result1CodeBased$finalStage, result1$finalStage, tolerance = 1e-05) expect_equal(result1CodeBased$finalPValues, result1$finalPValues, tolerance = 1e-05) expect_equal(result1CodeBased$finalConfidenceIntervalLowerBounds, result1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result1CodeBased$finalConfidenceIntervalUpperBounds, result1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result1CodeBased$medianUnbiasedEstimates, result1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result1), "character") df <- as.data.frame(result1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design12 <- getDesignInverseNormal( kMax = 4, alpha = 0.025, informationRates = informationRates, typeOfDesign = "WT", deltaWT = 0.45 ) stageResults <- getStageResults( design = design12, dataInput = dataExample7, equalVariances = TRUE, directionUpper = TRUE, stage = 2, thetaH0 = -1 ) ## Comparison of the results of StageResultsMeans object 'stageResults' with expected results expect_equal(stageResults$overallTestStatistics, c(1.9899749, 1.8884638, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$overallPValues, c(0.026564837, 0.030848764, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$overallMeans1, c(1, 1.06, 1.0383721, 1.0333333), tolerance = 1e-07) expect_equal(stageResults$overallMeans2, c(1.4, 1.4584906, 1.2927711, 1.4340426), tolerance = 1e-07) expect_equal(stageResults$overallStDevs1, c(1, 1.6618374, 1.7796344, 1.7187442), tolerance = 1e-07) expect_equal(stageResults$overallStDevs2, c(1, 1.6474262, 1.7846078, 1.7725841), tolerance = 1e-07) expect_equal(stageResults$overallSampleSizes1, c(22, 55, NA_real_, NA_real_)) expect_equal(stageResults$overallSampleSizes2, c(22, 53, NA_real_, NA_real_)) expect_equal(stageResults$testStatistics, c(1.9899749, 1.1994139, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$pValues, c(0.026564837, 0.11746538, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$effectSizes, c(-0.4, -0.39849057, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$combInverseNormal, c(1.9338654, 2.1431134, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults$weightsInverseNormal, c(0.4472136, 0.54772256, 0.54772256, 0.4472136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults), NA))) expect_output(print(stageResults)$show()) invisible(capture.output(expect_error(summary(stageResults), NA))) expect_output(summary(stageResults)$show()) stageResultsCodeBased <- eval(parse(text = getObjectRCode(stageResults, stringWrapParagraphWidth = NULL))) expect_equal(stageResultsCodeBased$overallTestStatistics, stageResults$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResultsCodeBased$overallPValues, stageResults$overallPValues, tolerance = 1e-05) expect_equal(stageResultsCodeBased$overallMeans1, stageResults$overallMeans1, tolerance = 1e-05) expect_equal(stageResultsCodeBased$overallMeans2, stageResults$overallMeans2, tolerance = 1e-05) expect_equal(stageResultsCodeBased$overallStDevs1, stageResults$overallStDevs1, tolerance = 1e-05) expect_equal(stageResultsCodeBased$overallStDevs2, stageResults$overallStDevs2, tolerance = 1e-05) expect_equal(stageResultsCodeBased$overallSampleSizes1, stageResults$overallSampleSizes1, tolerance = 1e-05) expect_equal(stageResultsCodeBased$overallSampleSizes2, stageResults$overallSampleSizes2, tolerance = 1e-05) expect_equal(stageResultsCodeBased$testStatistics, stageResults$testStatistics, tolerance = 1e-05) expect_equal(stageResultsCodeBased$pValues, stageResults$pValues, tolerance = 1e-05) expect_equal(stageResultsCodeBased$effectSizes, stageResults$effectSizes, tolerance = 1e-05) expect_equal(stageResultsCodeBased$combInverseNormal, stageResults$combInverseNormal, tolerance = 1e-05) expect_equal(stageResultsCodeBased$weightsInverseNormal, stageResults$weightsInverseNormal, tolerance = 1e-05) expect_type(names(stageResults), "character") df <- as.data.frame(stageResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getConditionalPowerMeans} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} conditionalPower <- getConditionalPower(stageResults, thetaH1 = 0.840, nPlanned = c(96, 64), assumedStDev = 2 ) ## Comparison of the results of ConditionalPowerResultsMeans object 'conditionalPower' with expected results expect_equal(conditionalPower$conditionalPower, c(NA_real_, NA_real_, 0.99975751, 0.99999919), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(conditionalPower), NA))) expect_output(print(conditionalPower)$show()) invisible(capture.output(expect_error(summary(conditionalPower), NA))) expect_output(summary(conditionalPower)$show()) conditionalPowerCodeBased <- eval(parse(text = getObjectRCode(conditionalPower, stringWrapParagraphWidth = NULL))) expect_equal(conditionalPowerCodeBased$conditionalPower, conditionalPower$conditionalPower, tolerance = 1e-05) expect_type(names(conditionalPower), "character") df <- as.data.frame(conditionalPower) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(conditionalPower) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } conditionalPowerPlot <- .getConditionalPowerPlot( stageResults = stageResults, thetaRange = seq(-0.8, 0.5, 0.1), nPlanned = c(96, 64), assumedStDev = 2, allocationRatioPlanned = 3 ) ## Comparison of the results of list object 'conditionalPowerPlot' with expected results expect_equal(conditionalPowerPlot$xValues, c(-0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5), tolerance = 1e-07) expect_equal(conditionalPowerPlot$condPowerValues, c(0.37570702, 0.47532662, 0.57738365, 0.67516684, 0.76267391, 0.83573986, 0.89261201, 0.9338489, 0.96168572, 0.97917178, 0.98938899, 0.99494036, 0.99774434, 0.99906067), tolerance = 1e-07) expect_equal(conditionalPowerPlot$likelihoodValues, c(0.45180702, 0.63888737, 0.81863148, 0.95048525, 0.99998877, 0.95331773, 0.82351787, 0.64461615, 0.45721677, 0.29385692, 0.17113644, 0.090311253, 0.043185112, 0.018711949), tolerance = 1e-07) expect_equal(conditionalPowerPlot$main, "Conditional Power with Likelihood") expect_equal(conditionalPowerPlot$xlab, "Effect size") expect_equal(conditionalPowerPlot$ylab, "Conditional power / Likelihood") expect_equal(conditionalPowerPlot$sub, "Stage = 2, # of remaining subjects = 160, sd = 2, allocation ratio = 3") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:orderingPValueLower} # @refFS[Formula]{fs:finalCITwoMeans} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} result2 <- getAnalysisResults( design = design12, dataInput = dataExample7, equalVariances = TRUE, directionUpper = FALSE, stage = 2, thetaH0 = 0.2, thetaH1 = -0.2, nPlanned = c(96, 64), allocationRatioPlanned = 3, normalApproximation = FALSE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'result2' with expected results expect_equal(result2$assumedStDev, 1.6547835, tolerance = 1e-07) expect_equal(result2$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result2$conditionalRejectionProbabilities, c(0.11857307, 0.20646025, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$conditionalPower, c(NA_real_, NA_real_, 0.50295479, 0.65954708), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalLowerBounds, c(-1.182291, -1.0666303, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedConfidenceIntervalUpperBounds, c(0.3822909, 0.2666303, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$repeatedPValues, c(0.081445577, 0.043264349, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result2$finalStage, NA_integer_) expect_equal(result2$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result2), NA))) expect_output(print(result2)$show()) invisible(capture.output(expect_error(summary(result2), NA))) expect_output(summary(result2)$show()) result2CodeBased <- eval(parse(text = getObjectRCode(result2, stringWrapParagraphWidth = NULL))) expect_equal(result2CodeBased$assumedStDev, result2$assumedStDev, tolerance = 1e-05) expect_equal(result2CodeBased$testActions, result2$testActions, tolerance = 1e-05) expect_equal(result2CodeBased$conditionalRejectionProbabilities, result2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result2CodeBased$conditionalPower, result2$conditionalPower, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedConfidenceIntervalLowerBounds, result2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedConfidenceIntervalUpperBounds, result2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result2CodeBased$repeatedPValues, result2$repeatedPValues, tolerance = 1e-05) expect_equal(result2CodeBased$finalStage, result2$finalStage, tolerance = 1e-05) expect_equal(result2CodeBased$finalPValues, result2$finalPValues, tolerance = 1e-05) expect_equal(result2CodeBased$finalConfidenceIntervalLowerBounds, result2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result2CodeBased$finalConfidenceIntervalUpperBounds, result2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result2CodeBased$medianUnbiasedEstimates, result2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(result2), "character") df <- as.data.frame(result2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:conditionalRejectionUnderNullGroupSequential} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalPowerTwoMeansEffect} design13 <- getDesignFisher(kMax = 4, alpha = 0.025, informationRates = informationRates) result3 <- getAnalysisResults( design = design13, dataInput = dataExample7, equalVariances = TRUE, directionUpper = FALSE, stage = 2, nPlanned = c(96, 64), thetaH1 = -0.4, allocationRatioPlanned = 2, normalApproximation = FALSE, iterations = 10000, seed = 442018 ) ## Comparison of the results of AnalysisResultsFisher object 'result3' with expected results expect_equal(result3$assumedStDev, 1.6547835, tolerance = 1e-07) expect_equal(result3$testActions, c("continue", "continue", NA_character_, NA_character_)) expect_equal(result3$conditionalRejectionProbabilities, c(0.031447357, 0.018451139, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalLowerBounds, c(-1.1295139, -1.1012297, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedConfidenceIntervalUpperBounds, c(0.32951385, 0.30122972, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$repeatedPValues, c(0.19930232, 0.21960219, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(result3$finalStage, NA_integer_) expect_equal(result3$finalPValues, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(result3$conditionalPowerSimulated, c(NA_real_, NA_real_, 0.1239, 0.2143), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(result3), NA))) expect_output(print(result3)$show()) invisible(capture.output(expect_error(summary(result3), NA))) expect_output(summary(result3)$show()) result3CodeBased <- eval(parse(text = getObjectRCode(result3, stringWrapParagraphWidth = NULL))) expect_equal(result3CodeBased$assumedStDev, result3$assumedStDev, tolerance = 1e-05) expect_equal(result3CodeBased$testActions, result3$testActions, tolerance = 1e-05) expect_equal(result3CodeBased$conditionalRejectionProbabilities, result3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedConfidenceIntervalLowerBounds, result3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedConfidenceIntervalUpperBounds, result3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result3CodeBased$repeatedPValues, result3$repeatedPValues, tolerance = 1e-05) expect_equal(result3CodeBased$finalStage, result3$finalStage, tolerance = 1e-05) expect_equal(result3CodeBased$finalPValues, result3$finalPValues, tolerance = 1e-05) expect_equal(result3CodeBased$finalConfidenceIntervalLowerBounds, result3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(result3CodeBased$finalConfidenceIntervalUpperBounds, result3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(result3CodeBased$medianUnbiasedEstimates, result3$medianUnbiasedEstimates, tolerance = 1e-05) expect_equal(result3CodeBased$conditionalPowerSimulated, result3$conditionalPowerSimulated, tolerance = 1e-05) expect_type(names(result3), "character") df <- as.data.frame(result3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(result3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing 'getStageResults'") test_that("'getStageResults' for an inverse normal design and one or two treatments", { .skipTestIfDisabled() designInverseNormal <- getDesignInverseNormal( kMax = 4, alpha = 0.025, sided = 1, typeOfDesign = "WT", deltaWT = 0.25, futilityBounds = rep(qnorm(0.7), 3) ) dataExample8 <- getDataset( n = c(10, 10), means = c(2, 3), stDevs = c(1, 1.5) ) # @refFS[Tab.]{fs:tab:output:getStageResultsMeans} # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} stageResults1 <- getStageResults( design = designInverseNormal, dataInput = dataExample8, stage = 2, thetaH0 = 0, directionUpper = TRUE, normalApproximation = FALSE, equalVariances = TRUE ) ## Comparison of the results of StageResultsMeans object 'stageResults1' with expected results expect_equal(stageResults1$overallTestStatistics, c(6.3245553, 8.3272484, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallPValues, c(6.846828e-05, 4.5964001e-08, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallMeans, c(2, 2.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallStDevs, c(1, 1.3426212, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$overallSampleSizes, c(10, 20, NA_real_, NA_real_)) expect_equal(stageResults1$testStatistics, c(6.3245553, 6.3245553, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$pValues, c(6.846828e-05, 6.846828e-05, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$effectSizes, c(2, 2.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$combInverseNormal, c(3.813637, 5.3932972, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults1$weightsInverseNormal, c(0.5, 0.5, 0.5, 0.5), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults1), NA))) expect_output(print(stageResults1)$show()) invisible(capture.output(expect_error(summary(stageResults1), NA))) expect_output(summary(stageResults1)$show()) stageResults1CodeBased <- eval(parse(text = getObjectRCode(stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(stageResults1CodeBased$overallTestStatistics, stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallPValues, stageResults1$overallPValues, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallMeans, stageResults1$overallMeans, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallStDevs, stageResults1$overallStDevs, tolerance = 1e-05) expect_equal(stageResults1CodeBased$overallSampleSizes, stageResults1$overallSampleSizes, tolerance = 1e-05) expect_equal(stageResults1CodeBased$testStatistics, stageResults1$testStatistics, tolerance = 1e-05) expect_equal(stageResults1CodeBased$pValues, stageResults1$pValues, tolerance = 1e-05) expect_equal(stageResults1CodeBased$effectSizes, stageResults1$effectSizes, tolerance = 1e-05) expect_equal(stageResults1CodeBased$combInverseNormal, stageResults1$combInverseNormal, tolerance = 1e-05) expect_equal(stageResults1CodeBased$weightsInverseNormal, stageResults1$weightsInverseNormal, tolerance = 1e-05) expect_type(names(stageResults1), "character") df <- as.data.frame(stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } dataExample9 <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) # @refFS[Tab.]{fs:tab:output:getStageResultsMeans} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticNormalCombinationTest} stageResults2 <- getStageResults( design = designInverseNormal, dataInput = dataExample9, stage = 2, thetaH0 = 0, directionUpper = TRUE, normalApproximation = FALSE, equalVariances = TRUE ) ## Comparison of the results of StageResultsMeans object 'stageResults2' with expected results expect_equal(stageResults2$overallTestStatistics, c(-1.3266499, -1.1850988, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallPValues, c(0.90410354, 0.87988596, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(stageResults2$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143), tolerance = 1e-07) expect_equal(stageResults2$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) expect_equal(stageResults2$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056), tolerance = 1e-07) expect_equal(stageResults2$overallSampleSizes1, c(22, 33, NA_real_, NA_real_)) expect_equal(stageResults2$overallSampleSizes2, c(22, 35, NA_real_, NA_real_)) expect_equal(stageResults2$testStatistics, c(-1.3266499, -0.48819395, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$pValues, c(0.90410354, 0.68487854, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$effectSizes, c(-0.4, -0.40380952, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$combInverseNormal, c(-1.3052935, -1.2633725, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(stageResults2$weightsInverseNormal, c(0.5, 0.5, 0.5, 0.5), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults2), NA))) expect_output(print(stageResults2)$show()) invisible(capture.output(expect_error(summary(stageResults2), NA))) expect_output(summary(stageResults2)$show()) stageResults2CodeBased <- eval(parse(text = getObjectRCode(stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(stageResults2CodeBased$overallTestStatistics, stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallPValues, stageResults2$overallPValues, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallMeans1, stageResults2$overallMeans1, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallMeans2, stageResults2$overallMeans2, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallStDevs1, stageResults2$overallStDevs1, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallStDevs2, stageResults2$overallStDevs2, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallSampleSizes1, stageResults2$overallSampleSizes1, tolerance = 1e-05) expect_equal(stageResults2CodeBased$overallSampleSizes2, stageResults2$overallSampleSizes2, tolerance = 1e-05) expect_equal(stageResults2CodeBased$testStatistics, stageResults2$testStatistics, tolerance = 1e-05) expect_equal(stageResults2CodeBased$pValues, stageResults2$pValues, tolerance = 1e-05) expect_equal(stageResults2CodeBased$effectSizes, stageResults2$effectSizes, tolerance = 1e-05) expect_equal(stageResults2CodeBased$combInverseNormal, stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(stageResults2CodeBased$weightsInverseNormal, stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(stageResults2), "character") df <- as.data.frame(stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getStageResults' for a Fisher design and one or two treatments", { .skipTestIfDisabled() designFisher <- getDesignFisher( kMax = 2, alpha = 0.025, alpha0Vec = 1, informationRates = c(0.5, 1), method = "equalAlpha" ) dataExample10 <- getDataset( n = c(10, 10), means = c(2, 3), stDevs = c(1, 1.5) ) # @refFS[Tab.]{fs:tab:output:getStageResultsMeans} # @refFS[Formula]{fs:testStatisticOneMean} # @refFS[Formula]{fs:pValuesOneMeanAlternativeGreater} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} stageResults3 <- getStageResults( design = designFisher, dataInput = dataExample10, stage = 2, thetaH0 = 0, directionUpper = TRUE, normalApproximation = FALSE, equalVariances = TRUE ) ## Comparison of the results of StageResultsMeans object 'stageResults3' with expected results expect_equal(stageResults3$overallTestStatistics, c(6.3245553, 8.3272484), tolerance = 1e-07) expect_equal(stageResults3$overallPValues, c(6.846828e-05, 4.5964001e-08), tolerance = 1e-07) expect_equal(stageResults3$overallMeans, c(2, 2.5), tolerance = 1e-07) expect_equal(stageResults3$overallStDevs, c(1, 1.3426212), tolerance = 1e-07) expect_equal(stageResults3$overallSampleSizes, c(10, 20)) expect_equal(stageResults3$testStatistics, c(6.3245553, 6.3245553), tolerance = 1e-07) expect_equal(stageResults3$pValues, c(6.846828e-05, 6.846828e-05), tolerance = 1e-07) expect_equal(stageResults3$effectSizes, c(2, 2.5), tolerance = 1e-07) expect_equal(stageResults3$combFisher, c(6.846828e-05, 4.6879053e-09), tolerance = 1e-07) expect_equal(stageResults3$weightsFisher, c(1, 1)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults3), NA))) expect_output(print(stageResults3)$show()) invisible(capture.output(expect_error(summary(stageResults3), NA))) expect_output(summary(stageResults3)$show()) stageResults3CodeBased <- eval(parse(text = getObjectRCode(stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(stageResults3CodeBased$overallTestStatistics, stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallPValues, stageResults3$overallPValues, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallMeans, stageResults3$overallMeans, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallStDevs, stageResults3$overallStDevs, tolerance = 1e-05) expect_equal(stageResults3CodeBased$overallSampleSizes, stageResults3$overallSampleSizes, tolerance = 1e-05) expect_equal(stageResults3CodeBased$testStatistics, stageResults3$testStatistics, tolerance = 1e-05) expect_equal(stageResults3CodeBased$pValues, stageResults3$pValues, tolerance = 1e-05) expect_equal(stageResults3CodeBased$effectSizes, stageResults3$effectSizes, tolerance = 1e-05) expect_equal(stageResults3CodeBased$combFisher, stageResults3$combFisher, tolerance = 1e-05) expect_equal(stageResults3CodeBased$weightsFisher, stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(stageResults3), "character") df <- as.data.frame(stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } dataExample11 <- getDataset( n1 = c(22, 11), n2 = c(22, 13), means1 = c(1, 1.1), means2 = c(1.4, 1.5), stDevs1 = c(1, 2), stDevs2 = c(1, 2) ) # @refFS[Tab.]{fs:tab:output:getStageResultsMeans} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} # @refFS[Formula]{fs:testStatisticFisherCombinationTest} stageResults4 <- getStageResults( design = designFisher, dataInput = dataExample11, stage = 2, thetaH0 = 0, directionUpper = TRUE, normalApproximation = FALSE, equalVariances = TRUE ) ## Comparison of the results of StageResultsMeans object 'stageResults4' with expected results expect_equal(stageResults4$overallTestStatistics, c(-1.3266499, -1.1850988), tolerance = 1e-07) expect_equal(stageResults4$overallPValues, c(0.90410354, 0.87988596), tolerance = 1e-07) expect_equal(stageResults4$overallMeans1, c(1, 1.0333333), tolerance = 1e-07) expect_equal(stageResults4$overallMeans2, c(1.4, 1.4371429), tolerance = 1e-07) expect_equal(stageResults4$overallStDevs1, c(1, 1.3814998), tolerance = 1e-07) expect_equal(stageResults4$overallStDevs2, c(1, 1.4254175), tolerance = 1e-07) expect_equal(stageResults4$overallSampleSizes1, c(22, 33)) expect_equal(stageResults4$overallSampleSizes2, c(22, 35)) expect_equal(stageResults4$testStatistics, c(-1.3266499, -0.48819395), tolerance = 1e-07) expect_equal(stageResults4$pValues, c(0.90410354, 0.68487854), tolerance = 1e-07) expect_equal(stageResults4$effectSizes, c(-0.4, -0.40380952), tolerance = 1e-07) expect_equal(stageResults4$combFisher, c(0.90410354, 0.61920111), tolerance = 1e-07) expect_equal(stageResults4$weightsFisher, c(1, 1)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults4), NA))) expect_output(print(stageResults4)$show()) invisible(capture.output(expect_error(summary(stageResults4), NA))) expect_output(summary(stageResults4)$show()) stageResults4CodeBased <- eval(parse(text = getObjectRCode(stageResults4, stringWrapParagraphWidth = NULL))) expect_equal(stageResults4CodeBased$overallTestStatistics, stageResults4$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResults4CodeBased$overallPValues, stageResults4$overallPValues, tolerance = 1e-05) expect_equal(stageResults4CodeBased$overallMeans1, stageResults4$overallMeans1, tolerance = 1e-05) expect_equal(stageResults4CodeBased$overallMeans2, stageResults4$overallMeans2, tolerance = 1e-05) expect_equal(stageResults4CodeBased$overallStDevs1, stageResults4$overallStDevs1, tolerance = 1e-05) expect_equal(stageResults4CodeBased$overallStDevs2, stageResults4$overallStDevs2, tolerance = 1e-05) expect_equal(stageResults4CodeBased$overallSampleSizes1, stageResults4$overallSampleSizes1, tolerance = 1e-05) expect_equal(stageResults4CodeBased$overallSampleSizes2, stageResults4$overallSampleSizes2, tolerance = 1e-05) expect_equal(stageResults4CodeBased$testStatistics, stageResults4$testStatistics, tolerance = 1e-05) expect_equal(stageResults4CodeBased$pValues, stageResults4$pValues, tolerance = 1e-05) expect_equal(stageResults4CodeBased$effectSizes, stageResults4$effectSizes, tolerance = 1e-05) expect_equal(stageResults4CodeBased$combFisher, stageResults4$combFisher, tolerance = 1e-05) expect_equal(stageResults4CodeBased$weightsFisher, stageResults4$weightsFisher, tolerance = 1e-05) expect_type(names(stageResults4), "character") df <- as.data.frame(stageResults4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' with a dataset of means and without defining a design", { .skipTestIfDisabled() dataExample12 <- getDataset( n1 = c(22), n2 = c(21), means1 = c(1.63), means2 = c(1.4), stds1 = c(1.2), stds2 = c(1.3) ) # @refFS[Tab.]{fs:tab:output:getStageResultsMeans} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} analysisResults1 <- getAnalysisResults(dataExample12, alpha = 0.02, sided = 2, stage = 1) ## Comparison of the results of AnalysisResultsInverseNormal object 'analysisResults1' with expected results expect_equal(analysisResults1$thetaH1, 0.23, tolerance = 1e-07) expect_equal(analysisResults1$assumedStDev, 1.2497805, tolerance = 1e-07) expect_equal(analysisResults1$testActions, "accept") expect_equal(analysisResults1$repeatedConfidenceIntervalLowerBounds, -0.69301003, tolerance = 1e-07) expect_equal(analysisResults1$repeatedConfidenceIntervalUpperBounds, 1.1530101, tolerance = 1e-07) expect_equal(analysisResults1$repeatedPValues, 0.54968031, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(analysisResults1), NA))) expect_output(print(analysisResults1)$show()) invisible(capture.output(expect_error(summary(analysisResults1), NA))) expect_output(summary(analysisResults1)$show()) analysisResults1CodeBased <- eval(parse(text = getObjectRCode(analysisResults1, stringWrapParagraphWidth = NULL))) expect_equal(analysisResults1CodeBased$thetaH1, analysisResults1$thetaH1, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$assumedStDev, analysisResults1$assumedStDev, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$testActions, analysisResults1$testActions, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedConfidenceIntervalLowerBounds, analysisResults1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedConfidenceIntervalUpperBounds, analysisResults1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedPValues, analysisResults1$repeatedPValues, tolerance = 1e-05) expect_type(names(analysisResults1), "character") df <- as.data.frame(analysisResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(analysisResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' with a dataset of means and without early efficacy stop", { .skipTestIfDisabled() design13 <- getDesignInverseNormal( kMax = 2, alpha = 0.05, typeOfDesign = "noEarlyEfficacy" ) dataExample13 <- getDataset( n1 = c(22, 11), n2 = c(22, 13), means1 = c(1, 3.4), means2 = c(2.4, 4.77), stDevs1 = c(2.2, 2.1), stDevs2 = c(3.1, 3.3) ) # @refFS[Tab.]{fs:tab:output:getStageResultsMeans} # @refFS[Formula]{fs:testStatisticDifferenceMeansEqualVariances} # @refFS[Formula]{fs:pValuesTwoMeansAlternativeGreaterEqualVariances} analysisResults1 <- getAnalysisResults(design13, dataExample13, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsInverseNormal object 'analysisResults1' with expected results expect_equal(analysisResults1$thetaH1, -1.4802857, tolerance = 1e-07) expect_equal(analysisResults1$assumedStDev, 2.9293915, tolerance = 1e-07) expect_equal(analysisResults1$testActions, c("continue", "reject")) expect_equal(analysisResults1$conditionalRejectionProbabilities, c(0.26163977, NA_real_), tolerance = 1e-07) expect_equal(analysisResults1$conditionalPower, c(NA_real_, NA_real_)) expect_equal(analysisResults1$repeatedConfidenceIntervalLowerBounds, c(NA_real_, -2.5168979), tolerance = 1e-07) expect_equal(analysisResults1$repeatedConfidenceIntervalUpperBounds, c(NA_real_, -0.25840683), tolerance = 1e-07) expect_equal(analysisResults1$repeatedPValues, c(NA_real_, 0.022205355), tolerance = 1e-07) expect_equal(analysisResults1$finalStage, 2) expect_equal(analysisResults1$finalPValues, c(NA_real_, 0.02220507), tolerance = 1e-07) expect_equal(analysisResults1$finalConfidenceIntervalLowerBounds, c(NA_real_, -2.6299347), tolerance = 1e-07) expect_equal(analysisResults1$finalConfidenceIntervalUpperBounds, c(NA_real_, -0.26287837), tolerance = 1e-07) expect_equal(analysisResults1$medianUnbiasedEstimates, c(NA_real_, -1.4464065), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(analysisResults1), NA))) expect_output(print(analysisResults1)$show()) invisible(capture.output(expect_error(summary(analysisResults1), NA))) expect_output(summary(analysisResults1)$show()) analysisResults1CodeBased <- eval(parse(text = getObjectRCode(analysisResults1, stringWrapParagraphWidth = NULL))) expect_equal(analysisResults1CodeBased$thetaH1, analysisResults1$thetaH1, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$assumedStDev, analysisResults1$assumedStDev, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$testActions, analysisResults1$testActions, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$conditionalRejectionProbabilities, analysisResults1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$conditionalPower, analysisResults1$conditionalPower, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedConfidenceIntervalLowerBounds, analysisResults1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedConfidenceIntervalUpperBounds, analysisResults1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedPValues, analysisResults1$repeatedPValues, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$finalStage, analysisResults1$finalStage, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$finalPValues, analysisResults1$finalPValues, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$finalConfidenceIntervalLowerBounds, analysisResults1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$finalConfidenceIntervalUpperBounds, analysisResults1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$medianUnbiasedEstimates, analysisResults1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(analysisResults1), "character") df <- as.data.frame(analysisResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(analysisResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) rpact/tests/testthat/test-f_simulation_multiarm_survival.R0000644000176200001440000041214414370207346024037 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_simulation_multiarm_survival.R ## | Creation date: 06 February 2023, 12:14:51 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Simulation Multi-Arm Survival Function") test_that("'getSimulationMultiArmSurvival': several configurations", { # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmSurvival} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmSurvivalCholeskyTransformation} # @refFS[Formula]{fs:simulationMultiArmSurvivalCorrMatrix} # @refFS[Formula]{fs:simulationMultiArmSurvivalEvents} # @refFS[Formula]{fs:simulationMultiArmSurvivalLogRanks} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} x1 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, plannedEvents = c(10, 30, 50), omegaMaxVector = seq(1, 1.6, 0.2), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x1' with expected results expect_equal(unlist(as.list(x1$eventsPerStage)), c(4, 56.544006, 118.06218, 3.7272727, 45.869846, 102.75827, 3.5, 30.37664, 63.385959, 3.3076923, 32.448585, 72.264513, 4, 49.635155, 106.15332, 3.8181818, 36.042521, 83.474993, 3.6666667, 30.759757, 64.009577, 3.5384615, 32.768737, 72.921706, 4, 65.124183, 133.16052, 3.9090909, 38.113637, 101.03155, 3.8333333, 29.450577, 70.659781, 3.7692308, 37.063433, 93.1602, 4, 43.825836, 90.344006, 4, 31.654176, 76.670094, 4, 38.617451, 74.294998, 4, 39.885794, 87.433784), tolerance = 1e-07) expect_equal(x1$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x1$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x1$iterations[3, ], c(10, 10, 9, 9)) expect_equal(x1$rejectAtLeastOne, c(0, 0.1, 0.4, 0.5), tolerance = 1e-07) expect_equal(unlist(as.list(x1$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0, 0, 0.2), tolerance = 1e-07) expect_equal(x1$futilityStop, c(0, 0, 0, 0)) expect_equal(x1$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x1$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x1$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x1$earlyStop[2, ], c(0, 0, 0.1, 0.1), tolerance = 1e-07) expect_equal(x1$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x1$successPerStage[2, ], c(0, 0, 0.1, 0.1), tolerance = 1e-07) expect_equal(x1$successPerStage[3, ], c(0, 0.1, 0.3, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x1$selectedArms)), c(1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0.1, 0.1, 1, 0.2, 0.1, 1, 0.2, 0.2, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.5, 0.5, 1, 0.4, 0.4, 1, 0.4, 0.4, 1, 0.5, 0.5, 1, 0, 0, 1, 0.1, 0.1, 1, 0.4, 0.3, 1, 0.2, 0.2), tolerance = 1e-07) expect_equal(x1$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x1$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x1$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x1$expectedNumberOfEvents, c(182.68801, 153.5825, 114.70922, 140.61265), tolerance = 1e-07) expect_equal(unlist(as.list(x1$singleNumberOfEventsPerStage)), c(2, 12.71817, 15, 1.9090909, 15.365854, 15.365854, 1.8333333, 5.2380952, 5.8201058, 1.7692308, 5.5627907, 5.9431525, 2, 5.8093191, 10, 2, 5.447619, 5.9099062, 2, 5.4545455, 6.0606061, 2, 5.6521739, 6.2801932, 2, 21.298347, 21.51817, 2.0909091, 7.4278263, 21.395349, 2.1666667, 3.9786992, 14.01999, 2.2307692, 9.7161004, 22.223992, 2, 0, 0, 2.1818182, 0.87745601, 3.4933517, 2.3333333, 12.978906, 8.4883336, 2.4615385, 12.307692, 13.675214, 2, 39.825836, 46.51817, 1.8181818, 26.77672, 41.522566, 1.6666667, 21.638545, 27.189214, 1.5384615, 23.578102, 33.872776), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(5.8245202e-05, 0.033918251, 0.017570415, 0.062651459), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[3, ], c(0.081443645, 0.17714318, 0.49831, 0.30622362), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$eventsPerStage, x1$eventsPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$iterations, x1$iterations, tolerance = 1e-05) expect_equal(x1CodeBased$rejectAtLeastOne, x1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x1CodeBased$rejectedArmsPerStage, x1$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$futilityStop, x1$futilityStop, tolerance = 1e-05) expect_equal(x1CodeBased$futilityPerStage, x1$futilityPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$earlyStop, x1$earlyStop, tolerance = 1e-05) expect_equal(x1CodeBased$successPerStage, x1$successPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$selectedArms, x1$selectedArms, tolerance = 1e-05) expect_equal(x1CodeBased$numberOfActiveArms, x1$numberOfActiveArms, tolerance = 1e-05) expect_equal(x1CodeBased$expectedNumberOfEvents, x1$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x1CodeBased$singleNumberOfEventsPerStage, x1$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPowerAchieved, x1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() x2 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "userDefined", activeArms = 4, plannedEvents = c(10, 30, 50), adaptations = rep(TRUE, 2), effectMatrix = matrix(c(0.1, 0.2, 0.3, 0.4, 0.2, 0.3, 0.4, 0.5), ncol = 4), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x2' with expected results expect_equal(unlist(as.list(x2$eventsPerStage)), c(5.5, 83.888278, 161.11661, 5, 67.731433, 137.0436, 6.5, 94.119048, 179.03968, 5.8333333, 80.884792, 166.06998, 6, 91.054945, 173.83883, 5.4166667, 70.455792, 139.76796, 7, 91.102564, 177.8547, 6.25, 81.100963, 165.22795), tolerance = 1e-07) expect_equal(x2$iterations[1, ], c(10, 10)) expect_equal(x2$iterations[2, ], c(10, 10)) expect_equal(x2$iterations[3, ], c(3, 9)) expect_equal(x2$rejectAtLeastOne, c(0, 0)) expect_equal(unlist(as.list(x2$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x2$futilityStop, c(0.7, 0.1), tolerance = 1e-07) expect_equal(x2$futilityPerStage[1, ], c(0, 0)) expect_equal(x2$futilityPerStage[2, ], c(0.7, 0.1), tolerance = 1e-07) expect_equal(x2$earlyStop[1, ], c(0, 0)) expect_equal(x2$earlyStop[2, ], c(0.7, 0.1), tolerance = 1e-07) expect_equal(x2$successPerStage[1, ], c(0, 0)) expect_equal(x2$successPerStage[2, ], c(0, 0)) expect_equal(x2$successPerStage[3, ], c(0, 0)) expect_equal(unlist(as.list(x2$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0.4, 0.1, 1, 0.5, 0.5, 1, 0.4, 0.1, 1, 0.1, 0, 1, 0.2, 0.1, 1, 0.4, 0.4), tolerance = 1e-07) expect_equal(x2$numberOfActiveArms[1, ], c(4, 4)) expect_equal(x2$numberOfActiveArms[2, ], c(1, 1)) expect_equal(x2$numberOfActiveArms[3, ], c(1, 1)) expect_equal(x2$expectedNumberOfEvents, c(140, 189.47868), tolerance = 1e-07) expect_equal(unlist(as.list(x2$singleNumberOfEventsPerStage)), c(0.5, 0, 0, 0.83333333, 0, 0, 1.5, 9.2307692, 7.6923077, 1.6666667, 12.320026, 15.873016, 1, 6.6666667, 5.5555556, 1.25, 2.3076923, 0, 2, 5.7142857, 9.5238095, 2.0833333, 12.119531, 14.814815, 5, 78.388278, 77.228327, 4.1666667, 62.731433, 69.312169), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0, 1.5253195e-09), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[3, ], c(0, 1.1842379e-15), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$eventsPerStage, x2$eventsPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$iterations, x2$iterations, tolerance = 1e-05) expect_equal(x2CodeBased$rejectAtLeastOne, x2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x2CodeBased$rejectedArmsPerStage, x2$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$futilityStop, x2$futilityStop, tolerance = 1e-05) expect_equal(x2CodeBased$futilityPerStage, x2$futilityPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$earlyStop, x2$earlyStop, tolerance = 1e-05) expect_equal(x2CodeBased$successPerStage, x2$successPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$selectedArms, x2$selectedArms, tolerance = 1e-05) expect_equal(x2CodeBased$numberOfActiveArms, x2$numberOfActiveArms, tolerance = 1e-05) expect_equal(x2CodeBased$expectedNumberOfEvents, x2$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x2CodeBased$singleNumberOfEventsPerStage, x2$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPowerAchieved, x2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x3 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "sigmoidEmax", gED50 = 2, slope = 0.5, activeArms = 4, plannedEvents = c(10, 30, 50), omegaMaxVector = seq(1, 1.6, 0.2), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x3' with expected results expect_equal(unlist(as.list(x3$eventsPerStage)), c(4, 56.544006, 118.06218, 3.8499139, 46.858849, 105.91374, 3.7209785, 37.392533, 79.701207, 3.6090171, 41.322916, 96.584767, 4, 49.635155, 106.15332, 3.8816273, 36.741574, 86.112527, 3.7799362, 32.140988, 69.139159, 3.6916324, 34.296961, 77.330183, 4, 65.124183, 133.16052, 3.9002999, 39.328667, 103.83053, 3.8146499, 31.005549, 75.358715, 3.7402755, 36.850923, 90.063067, 4, 43.825836, 90.344006, 3.9133408, 32.25912, 80.994092, 3.8388939, 33.67594, 74.187296, 3.7742477, 34.474746, 77.613714), tolerance = 1e-07) expect_equal(x3$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x3$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x3$iterations[3, ], c(10, 10, 10, 9)) expect_equal(x3$rejectAtLeastOne, c(0, 0.1, 0.3, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x3$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x3$futilityStop, c(0, 0, 0, 0)) expect_equal(x3$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x3$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x3$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x3$earlyStop[2, ], c(0, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x3$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x3$successPerStage[2, ], c(0, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x3$successPerStage[3, ], c(0, 0.1, 0.3, 0.2), tolerance = 1e-07) expect_equal(unlist(as.list(x3$selectedArms)), c(1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0.2, 0.2, 1, 0.4, 0.3, 1, 0.2, 0.2, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.5, 0.5, 1, 0.4, 0.4, 1, 0.4, 0.4, 1, 0.4, 0.4, 1, 0, 0, 1, 0.1, 0.1, 1, 0.3, 0.3, 1, 0.1, 0.1), tolerance = 1e-07) expect_equal(x3$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x3$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x3$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x3$expectedNumberOfEvents, c(182.68801, 158.69386, 129.88152, 143.2193), tolerance = 1e-07) expect_equal(unlist(as.list(x3$singleNumberOfEventsPerStage)), c(2, 12.71817, 15, 2.0015199, 15.596608, 15.596608, 2.0028257, 10.765048, 10.765048, 2.0039595, 12.760745, 18.508821, 2, 5.8093191, 10, 2.0332334, 5.447619, 5.9126663, 2.0617834, 5.4545455, 5.4545455, 2.0865748, 5.6521739, 6.2801932, 2, 21.298347, 21.51817, 2.0519059, 8.0160405, 21.043571, 2.0964971, 4.2843932, 12.80954, 2.135218, 8.1574931, 16.459114, 2, 0, 0, 2.0649468, 0.93345197, 5.2766854, 2.120741, 6.9305404, 8.9677303, 2.1691901, 5.7473444, 6.3859382, 2, 39.825836, 46.51817, 1.848394, 27.412327, 43.458287, 1.7181528, 22.906506, 31.543625, 1.6050576, 24.953154, 36.753029), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(5.8245202e-05, 0.027881828, 0.017394693, 0.05621525), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[3, ], c(0.081443645, 0.17047212, 0.40326875, 0.20898924), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$eventsPerStage, x3$eventsPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$iterations, x3$iterations, tolerance = 1e-05) expect_equal(x3CodeBased$rejectAtLeastOne, x3$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x3CodeBased$rejectedArmsPerStage, x3$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$futilityStop, x3$futilityStop, tolerance = 1e-05) expect_equal(x3CodeBased$futilityPerStage, x3$futilityPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$earlyStop, x3$earlyStop, tolerance = 1e-05) expect_equal(x3CodeBased$successPerStage, x3$successPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$selectedArms, x3$selectedArms, tolerance = 1e-05) expect_equal(x3CodeBased$numberOfActiveArms, x3$numberOfActiveArms, tolerance = 1e-05) expect_equal(x3CodeBased$expectedNumberOfEvents, x3$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x3CodeBased$singleNumberOfEventsPerStage, x3$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPowerAchieved, x3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x4 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "all", plannedEvents = c(10, 30, 50), omegaMaxVector = seq(1, 1.6, 0.2), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x4' with expected results expect_equal(unlist(as.list(x4$eventsPerStage)), c(4, 43.80534, 83.80534, 3.7272727, 41, 78.272727, 3.5, 36.991095, 71.991095, 3.3076923, 31.601422, 64.678345, 4, 43.80534, 83.80534, 3.8181818, 42, 80.181818, 3.6666667, 38.752575, 75.419242, 3.5384615, 33.806172, 69.190787, 4, 43.80534, 83.80534, 3.9090909, 43, 82.090909, 3.8333333, 40.514056, 78.847389, 3.7692308, 36.010922, 73.70323, 4, 43.80534, 83.80534, 4, 44, 84, 4, 42.275537, 82.275537, 4, 38.215673, 78.215673), tolerance = 1e-07) expect_equal(x4$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x4$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x4$iterations[3, ], c(10, 10, 10, 10)) expect_equal(x4$rejectAtLeastOne, c(0, 0.1, 0.2, 0.2), tolerance = 1e-07) expect_equal(unlist(as.list(x4$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1, 0, 0, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.2, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x4$futilityStop, c(0, 0, 0, 0)) expect_equal(x4$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x4$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x4$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x4$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x4$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x4$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x4$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x4$selectedArms)), c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)) expect_equal(x4$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x4$numberOfActiveArms[2, ], c(4, 4, 4, 4)) expect_equal(x4$numberOfActiveArms[3, ], c(4, 4, 4, 4)) expect_equal(x4$expectedNumberOfEvents, c(209.51335, 210, 205.68884, 195.53918), tolerance = 1e-07) expect_equal(unlist(as.list(x4$singleNumberOfEventsPerStage)), c(2, 19.90267, 20, 1.9090909, 19.090909, 19.090909, 1.8333333, 17.542954, 18.333333, 1.7692308, 15.133855, 17.692308, 2, 19.90267, 20, 2, 20, 20, 2, 19.137768, 20, 2, 17.107836, 20, 2, 19.90267, 20, 2.0909091, 20.909091, 20.909091, 2.1666667, 20.732582, 21.666667, 2.2307692, 19.081818, 22.307692, 2, 19.90267, 20, 2.1818182, 21.818182, 21.818182, 2.3333333, 22.327396, 23.333333, 2.4615385, 21.055799, 24.615385, 2, 19.90267, 20, 1.8181818, 18.181818, 18.181818, 1.6666667, 15.94814, 16.666667, 1.5384615, 13.159874, 15.384615), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.09225544, 0.10755451, 0.080008195, 0.16137979), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[3, ], c(0.011907723, 0.030096405, 0.063317228, 0.080810126), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$eventsPerStage, x4$eventsPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$iterations, x4$iterations, tolerance = 1e-05) expect_equal(x4CodeBased$rejectAtLeastOne, x4$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x4CodeBased$rejectedArmsPerStage, x4$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$futilityStop, x4$futilityStop, tolerance = 1e-05) expect_equal(x4CodeBased$futilityPerStage, x4$futilityPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$earlyStop, x4$earlyStop, tolerance = 1e-05) expect_equal(x4CodeBased$successPerStage, x4$successPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$selectedArms, x4$selectedArms, tolerance = 1e-05) expect_equal(x4CodeBased$numberOfActiveArms, x4$numberOfActiveArms, tolerance = 1e-05) expect_equal(x4CodeBased$expectedNumberOfEvents, x4$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x4CodeBased$singleNumberOfEventsPerStage, x4$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPowerAchieved, x4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x5 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "rBest", rValue = 2, plannedEvents = c(10, 30, 50), omegaMaxVector = seq(1, 1.6, 0.2), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x5' with expected results expect_equal(unlist(as.list(x5$eventsPerStage)), c(4, 52.52163, 101.87923, 3.7272727, 38.874416, 80.596598, 3.5, 26.84484, 58.886153, 3.3076923, 30.949369, 59.030095, 4, 46.265898, 91.178205, 3.8181818, 38.846483, 77.651274, 3.6666667, 31.816991, 74.831476, 3.5384615, 34.40256, 64.30431, 4, 45.854963, 88.5459, 3.9090909, 42.746334, 86.637949, 3.8333333, 33.812131, 81.900895, 3.7692308, 37.761125, 70.330365, 4, 39.599231, 77.844872, 4, 51.153533, 106.61534, 4, 33.295158, 78.303665, 4, 52.301815, 100.0206), tolerance = 1e-07) expect_equal(x5$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x5$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x5$iterations[3, ], c(10, 10, 10, 10)) expect_equal(x5$rejectAtLeastOne, c(0.1, 0, 0.2, 0.6), tolerance = 1e-07) expect_equal(unlist(as.list(x5$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0.4), tolerance = 1e-07) expect_equal(x5$futilityStop, c(0, 0, 0, 0)) expect_equal(x5$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x5$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x5$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x5$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x5$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x5$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x5$successPerStage[3, ], c(0, 0, 0.2, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x5$selectedArms)), c(1, 0.7, 0.7, 1, 0.4, 0.4, 1, 0.2, 0.2, 1, 0.3, 0.3, 1, 0.5, 0.5, 1, 0.3, 0.3, 1, 0.6, 0.6, 1, 0.4, 0.4, 1, 0.5, 0.5, 1, 0.5, 0.5, 1, 0.7, 0.7, 1, 0.5, 0.5, 1, 0.3, 0.3, 1, 0.8, 0.8, 1, 0.5, 0.5, 1, 0.8, 0.8), tolerance = 1e-07) expect_equal(x5$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x5$numberOfActiveArms[2, ], c(2, 2, 2, 2)) expect_equal(x5$numberOfActiveArms[3, ], c(2, 2, 2, 2)) expect_equal(x5$expectedNumberOfEvents, c(181.7241, 185.49972, 161.03264, 167.26743), tolerance = 1e-07) expect_equal(unlist(as.list(x5$singleNumberOfEventsPerStage)), c(2, 20.481343, 20.156522, 1.9090909, 10.277572, 13.076122, 1.8333333, 6.3781513, 6.3781513, 1.7692308, 7.1692484, 7.9523038, 2, 14.225611, 15.711226, 2, 10.15873, 10.15873, 2, 11.183635, 17.351324, 2, 10.39167, 9.7733283, 2, 13.814676, 13.489856, 2.0909091, 13.967672, 15.245555, 2.1666667, 13.012108, 22.425602, 2.2307692, 13.519466, 12.440818, 2, 7.5589445, 9.044559, 2.1818182, 22.283962, 26.815748, 2.3333333, 12.328469, 19.345345, 2.4615385, 27.829386, 27.590364, 2, 28.040287, 29.201081, 1.8181818, 24.869571, 28.64606, 1.6666667, 16.966689, 25.663162, 1.5384615, 20.472429, 20.128422), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.0011884888, 0.025687618, 0.050936222, 0.056920177), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[3, ], c(0.16000064, 0.17717891, 0.25226702, 0.41435883), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$eventsPerStage, x5$eventsPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$iterations, x5$iterations, tolerance = 1e-05) expect_equal(x5CodeBased$rejectAtLeastOne, x5$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x5CodeBased$rejectedArmsPerStage, x5$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$futilityStop, x5$futilityStop, tolerance = 1e-05) expect_equal(x5CodeBased$futilityPerStage, x5$futilityPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$earlyStop, x5$earlyStop, tolerance = 1e-05) expect_equal(x5CodeBased$successPerStage, x5$successPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$selectedArms, x5$selectedArms, tolerance = 1e-05) expect_equal(x5CodeBased$numberOfActiveArms, x5$numberOfActiveArms, tolerance = 1e-05) expect_equal(x5CodeBased$expectedNumberOfEvents, x5$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x5CodeBased$singleNumberOfEventsPerStage, x5$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPowerAchieved, x5$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x6 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedEvents = c(10, 30, 50), omegaMaxVector = seq(1, 1.6, 0.2), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x6' with expected results expect_equal(unlist(as.list(x6$eventsPerStage)), c(4, 61.733546, 127.87237, 3.7272727, 31.938683, 70.204069, 3.5, 41.1271, 91.550286, 3.3076923, 34.649784, 81.031044, 4, 56.48818, 112.05759, 3.8181818, 40.038722, 105.34522, 3.6666667, 42.689301, 87.532881, 3.5384615, 28.026291, 80.666398, 4, 48.154846, 98.724256, 3.9090909, 30.896746, 79.036909, 3.8333333, 37.59905, 82.662218, 3.7692308, 38.754446, 86.274767, 4, 48.730993, 104.3004, 4, 35.685987, 84.168898, 4, 44.550112, 108.98044, 4, 36.742663, 80.705475), tolerance = 1e-07) expect_equal(x6$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x6$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x6$iterations[3, ], c(10, 9, 9, 10)) expect_equal(x6$rejectAtLeastOne, c(0, 0.3, 0.5, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x6$rejectedArmsPerStage)), c(0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0.2, 0, 0, 0.3, 0, 0, 0.2), tolerance = 1e-07) expect_equal(x6$futilityStop, c(0, 0, 0, 0)) expect_equal(x6$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x6$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x6$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x6$earlyStop[2, ], c(0, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(x6$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x6$successPerStage[2, ], c(0, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(x6$successPerStage[3, ], c(0, 0.2, 0.4, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x6$selectedArms)), c(1, 0.5, 0.5, 1, 0.3, 0, 1, 0.3, 0.2, 1, 0.3, 0.2, 1, 0.3, 0.2, 1, 0.5, 0.5, 1, 0.2, 0.1, 1, 0.3, 0.3, 1, 0.1, 0.1, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.3, 0.2, 1, 0.2, 0.2, 1, 0.3, 0.3, 1, 0.5, 0.5, 1, 0.3, 0.3), tolerance = 1e-07) expect_equal(x6$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x6$numberOfActiveArms[2, ], c(1.1, 1.3, 1.1, 1.2), tolerance = 1e-07) expect_equal(x6$numberOfActiveArms[3, ], c(1, 1.1111111, 1, 1), tolerance = 1e-07) expect_equal(x6$expectedNumberOfEvents, c(182.78185, 142.9628, 156.19514, 150.78355), tolerance = 1e-07) expect_equal(unlist(as.list(x6$singleNumberOfEventsPerStage)), c(2, 18.5787, 20.56941, 1.9090909, 5.8775077, 0, 1.8333333, 9.5135564, 11.640212, 1.7692308, 9.2660953, 10.697674, 2, 13.333333, 10, 2, 13.886638, 27.041107, 2, 10.909091, 6.0606061, 2, 2.4118335, 16.956522, 2, 5, 5, 2.0909091, 4.6537525, 9.8747764, 2.1666667, 5.6521739, 6.2801932, 2.2307692, 12.909219, 11.836735, 2, 5.5761462, 10, 2.1818182, 9.3520845, 10.217524, 2.3333333, 12.436568, 25.647358, 2.4615385, 10.666667, 8.2792264, 2, 39.154846, 45.56941, 1.8181818, 22.333902, 38.265387, 1.6666667, 28.113543, 38.782975, 1.5384615, 22.075996, 35.683586), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.018816179, 0.071905821, 0.002298516, 0.067085771), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[3, ], c(0.080015186, 0.29125387, 0.18887123, 0.4033636), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$eventsPerStage, x6$eventsPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$iterations, x6$iterations, tolerance = 1e-05) expect_equal(x6CodeBased$rejectAtLeastOne, x6$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x6CodeBased$rejectedArmsPerStage, x6$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$futilityStop, x6$futilityStop, tolerance = 1e-05) expect_equal(x6CodeBased$futilityPerStage, x6$futilityPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$earlyStop, x6$earlyStop, tolerance = 1e-05) expect_equal(x6CodeBased$successPerStage, x6$successPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$selectedArms, x6$selectedArms, tolerance = 1e-05) expect_equal(x6CodeBased$numberOfActiveArms, x6$numberOfActiveArms, tolerance = 1e-05) expect_equal(x6CodeBased$expectedNumberOfEvents, x6$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x6CodeBased$singleNumberOfEventsPerStage, x6$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPowerAchieved, x6$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x7 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, plannedEvents = c(10, 30, 50), omegaMaxVector = seq(1, 1.6, 0.2), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x7' with expected results expect_equal(unlist(as.list(x7$eventsPerStage)), c(4, 56.544006, 109.08801, 3.7272727, 45.869846, 88.01242, 3.5, 30.37664, 55.609943, 3.3076923, 32.448585, 64.38291, 4, 49.635155, 95.27031, 3.8181818, 36.042521, 68.26686, 3.6666667, 30.759757, 56.23356, 3.5384615, 32.768737, 65.040103, 4, 65.124183, 126.24837, 3.9090909, 38.113637, 72.318183, 3.8333333, 29.450577, 53.284552, 3.7692308, 37.063433, 73.850273, 4, 43.825836, 83.651672, 4, 31.654176, 59.308352, 4, 38.617451, 65.970174, 4, 39.885794, 79.552181), tolerance = 1e-07) expect_equal(x7$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x7$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x7$iterations[3, ], c(10, 10, 9, 9)) expect_equal(x7$rejectAtLeastOne, c(0, 0.1, 0.4, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x7$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0, 0, 0.2), tolerance = 1e-07) expect_equal(x7$futilityStop, c(0, 0, 0, 0)) expect_equal(x7$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x7$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x7$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x7$earlyStop[2, ], c(0, 0, 0.1, 0.1), tolerance = 1e-07) expect_equal(x7$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x7$successPerStage[2, ], c(0, 0, 0.1, 0.1), tolerance = 1e-07) expect_equal(x7$successPerStage[3, ], c(0, 0.1, 0.3, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x7$selectedArms)), c(1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0.1, 0.1, 1, 0.2, 0.1, 1, 0.2, 0.2, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.5, 0.5, 1, 0.4, 0.4, 1, 0.4, 0.4, 1, 0.5, 0.5, 1, 0, 0, 1, 0.1, 0.1, 1, 0.4, 0.3, 1, 0.2, 0.2), tolerance = 1e-07) expect_equal(x7$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x7$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x7$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x7$expectedNumberOfEvents, c(169.30334, 121.79095, 98.577582, 123.23372), tolerance = 1e-07) expect_equal(unlist(as.list(x7$singleNumberOfEventsPerStage)), c(2, 12.71817, 12.71817, 1.9090909, 15.365854, 15.365854, 1.8333333, 5.2380952, 5.8201058, 1.7692308, 5.5627907, 5.9431525, 2, 5.8093191, 5.8093191, 2, 5.447619, 5.447619, 2, 5.4545455, 6.0606061, 2, 5.6521739, 6.2801932, 2, 21.298347, 21.298347, 2.0909091, 7.4278263, 7.4278263, 2.1666667, 3.9786992, 4.4207768, 2.2307692, 9.7161004, 10.795667, 2, 0, 0, 2.1818182, 0.87745601, 0.87745601, 2.3333333, 12.978906, 7.9395257, 2.4615385, 12.307692, 13.675214, 2, 39.825836, 39.825836, 1.8181818, 26.77672, 26.77672, 1.6666667, 21.638545, 19.413198, 1.5384615, 23.578102, 25.991173), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(5.8245202e-05, 0.033918251, 0.017570415, 0.062651459), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[3, ], c(0.075858531, 0.086024261, 0.37522404, 0.19729909), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7), NA))) expect_output(print(x7)$show()) invisible(capture.output(expect_error(summary(x7), NA))) expect_output(summary(x7)$show()) x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL))) expect_equal(x7CodeBased$eventsPerStage, x7$eventsPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$iterations, x7$iterations, tolerance = 1e-05) expect_equal(x7CodeBased$rejectAtLeastOne, x7$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x7CodeBased$rejectedArmsPerStage, x7$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$futilityStop, x7$futilityStop, tolerance = 1e-05) expect_equal(x7CodeBased$futilityPerStage, x7$futilityPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$earlyStop, x7$earlyStop, tolerance = 1e-05) expect_equal(x7CodeBased$successPerStage, x7$successPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$selectedArms, x7$selectedArms, tolerance = 1e-05) expect_equal(x7CodeBased$numberOfActiveArms, x7$numberOfActiveArms, tolerance = 1e-05) expect_equal(x7CodeBased$expectedNumberOfEvents, x7$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x7CodeBased$singleNumberOfEventsPerStage, x7$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalPowerAchieved, x7$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x7), "character") df <- as.data.frame(x7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x8 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "all", plannedEvents = c(10, 30, 50), omegaMaxVector = seq(1, 1.6, 0.2), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x8' with expected results expect_equal(unlist(as.list(x8$eventsPerStage)), c(4, 43.80534, 83.61068, 3.7272727, 41, 78.272727, 3.5, 36.991095, 70.482189, 3.3076923, 31.601422, 59.895151, 4, 43.80534, 83.61068, 3.8181818, 42, 80.181818, 3.6666667, 38.752575, 73.838484, 3.5384615, 33.806172, 64.073883, 4, 43.80534, 83.61068, 3.9090909, 43, 82.090909, 3.8333333, 40.514056, 77.194778, 3.7692308, 36.010922, 68.252614, 4, 43.80534, 83.61068, 4, 44, 84, 4, 42.275537, 80.551073, 4, 38.215673, 72.431346), tolerance = 1e-07) expect_equal(x8$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x8$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x8$iterations[3, ], c(10, 10, 10, 10)) expect_equal(x8$rejectAtLeastOne, c(0, 0.1, 0.2, 0.2), tolerance = 1e-07) expect_equal(unlist(as.list(x8$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1, 0, 0, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.2, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x8$futilityStop, c(0, 0, 0, 0)) expect_equal(x8$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x8$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x8$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x8$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x8$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x8$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x8$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x8$selectedArms)), c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)) expect_equal(x8$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x8$numberOfActiveArms[2, ], c(4, 4, 4, 4)) expect_equal(x8$numberOfActiveArms[3, ], c(4, 4, 4, 4)) expect_equal(x8$expectedNumberOfEvents, c(209.0267, 210, 201.37768, 181.07836), tolerance = 1e-07) expect_equal(unlist(as.list(x8$singleNumberOfEventsPerStage)), c(2, 19.90267, 19.90267, 1.9090909, 19.090909, 19.090909, 1.8333333, 17.542954, 17.542954, 1.7692308, 15.133855, 15.133855, 2, 19.90267, 19.90267, 2, 20, 20, 2, 19.137768, 19.137768, 2, 17.107836, 17.107836, 2, 19.90267, 19.90267, 2.0909091, 20.909091, 20.909091, 2.1666667, 20.732582, 20.732582, 2.2307692, 19.081818, 19.081818, 2, 19.90267, 19.90267, 2.1818182, 21.818182, 21.818182, 2.3333333, 22.327396, 22.327396, 2.4615385, 21.055799, 21.055799, 2, 19.90267, 19.90267, 1.8181818, 18.181818, 18.181818, 1.6666667, 15.94814, 15.94814, 1.5384615, 13.159874, 13.159874), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.09225544, 0.10755451, 0.080008195, 0.16137979), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[3, ], c(0.011968708, 0.030096405, 0.063317862, 0.066369104), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8), NA))) expect_output(print(x8)$show()) invisible(capture.output(expect_error(summary(x8), NA))) expect_output(summary(x8)$show()) x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL))) expect_equal(x8CodeBased$eventsPerStage, x8$eventsPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$iterations, x8$iterations, tolerance = 1e-05) expect_equal(x8CodeBased$rejectAtLeastOne, x8$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x8CodeBased$rejectedArmsPerStage, x8$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$futilityStop, x8$futilityStop, tolerance = 1e-05) expect_equal(x8CodeBased$futilityPerStage, x8$futilityPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$earlyStop, x8$earlyStop, tolerance = 1e-05) expect_equal(x8CodeBased$successPerStage, x8$successPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$selectedArms, x8$selectedArms, tolerance = 1e-05) expect_equal(x8CodeBased$numberOfActiveArms, x8$numberOfActiveArms, tolerance = 1e-05) expect_equal(x8CodeBased$expectedNumberOfEvents, x8$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x8CodeBased$singleNumberOfEventsPerStage, x8$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalPowerAchieved, x8$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x8), "character") df <- as.data.frame(x8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x9 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "rBest", rValue = 2, plannedEvents = c(10, 30, 50), omegaMaxVector = seq(1, 1.6, 0.2), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x9' with expected results expect_equal(unlist(as.list(x9$eventsPerStage)), c(4, 52.52163, 101.04326, 3.7272727, 38.874416, 74.021559, 3.5, 26.84484, 50.189681, 3.3076923, 30.949369, 58.591046, 4, 46.265898, 88.531796, 3.8181818, 38.846483, 73.874785, 3.6666667, 31.816991, 59.967314, 3.5384615, 34.40256, 65.266658, 4, 45.854963, 87.709926, 3.9090909, 42.746334, 81.583577, 3.8333333, 33.812131, 63.790928, 3.7692308, 37.761125, 71.75302, 4, 39.599231, 75.198463, 4, 51.153533, 98.307067, 4, 33.295158, 62.590316, 4, 52.301815, 100.60363), tolerance = 1e-07) expect_equal(x9$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x9$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x9$iterations[3, ], c(10, 10, 10, 10)) expect_equal(x9$rejectAtLeastOne, c(0.1, 0, 0.2, 0.6), tolerance = 1e-07) expect_equal(unlist(as.list(x9$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0.4), tolerance = 1e-07) expect_equal(x9$futilityStop, c(0, 0, 0, 0)) expect_equal(x9$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x9$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x9$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x9$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x9$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x9$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x9$successPerStage[3, ], c(0, 0, 0.2, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x9$selectedArms)), c(1, 0.7, 0.7, 1, 0.4, 0.4, 1, 0.2, 0.2, 1, 0.3, 0.3, 1, 0.5, 0.5, 1, 0.3, 0.3, 1, 0.6, 0.6, 1, 0.4, 0.4, 1, 0.5, 0.5, 1, 0.5, 0.5, 1, 0.7, 0.7, 1, 0.5, 0.5, 1, 0.3, 0.3, 1, 0.8, 0.8, 1, 0.5, 0.5, 1, 0.8, 0.8), tolerance = 1e-07) expect_equal(x9$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x9$numberOfActiveArms[2, ], c(2, 2, 2, 2)) expect_equal(x9$numberOfActiveArms[3, ], c(2, 2, 2, 2)) expect_equal(x9$expectedNumberOfEvents, c(178.24172, 173.11501, 129.7381, 168.7644), tolerance = 1e-07) expect_equal(unlist(as.list(x9$singleNumberOfEventsPerStage)), c(2, 20.481343, 20.481343, 1.9090909, 10.277572, 10.277572, 1.8333333, 6.3781513, 6.3781513, 1.7692308, 7.1692484, 7.1692484, 2, 14.225611, 14.225611, 2, 10.15873, 10.15873, 2, 11.183635, 11.183635, 2, 10.39167, 10.39167, 2, 13.814676, 13.814676, 2.0909091, 13.967672, 13.967672, 2.1666667, 13.012108, 13.012108, 2.2307692, 13.519466, 13.519466, 2, 7.5589445, 7.5589445, 2.1818182, 22.283962, 22.283962, 2.3333333, 12.328469, 12.328469, 2.4615385, 27.829386, 27.829386, 2, 28.040287, 28.040287, 1.8181818, 24.869571, 24.869571, 1.6666667, 16.966689, 16.966689, 1.5384615, 20.472429, 20.472429), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.0011884888, 0.025687618, 0.050936222, 0.056920177), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[3, ], c(0.13630501, 0.14441052, 0.13257023, 0.41932885), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x9), NA))) expect_output(print(x9)$show()) invisible(capture.output(expect_error(summary(x9), NA))) expect_output(summary(x9)$show()) x9CodeBased <- eval(parse(text = getObjectRCode(x9, stringWrapParagraphWidth = NULL))) expect_equal(x9CodeBased$eventsPerStage, x9$eventsPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$iterations, x9$iterations, tolerance = 1e-05) expect_equal(x9CodeBased$rejectAtLeastOne, x9$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x9CodeBased$rejectedArmsPerStage, x9$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$futilityStop, x9$futilityStop, tolerance = 1e-05) expect_equal(x9CodeBased$futilityPerStage, x9$futilityPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$earlyStop, x9$earlyStop, tolerance = 1e-05) expect_equal(x9CodeBased$successPerStage, x9$successPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$selectedArms, x9$selectedArms, tolerance = 1e-05) expect_equal(x9CodeBased$numberOfActiveArms, x9$numberOfActiveArms, tolerance = 1e-05) expect_equal(x9CodeBased$expectedNumberOfEvents, x9$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x9CodeBased$singleNumberOfEventsPerStage, x9$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$conditionalPowerAchieved, x9$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x9), "character") df <- as.data.frame(x9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x10 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedEvents = c(10, 30, 50), omegaMaxVector = seq(1, 1.6, 0.2), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), intersectionTest = "Hierarchical", maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x10' with expected results expect_equal(unlist(as.list(x10$eventsPerStage)), c(4, 56.657929, 125.67531, 3.7272727, 49.785816, 137.47812, 3.5, 31.249399, 83.970115, 3.3076923, 24.380086, 39.12662, 4, 50.333545, 107.06446, 3.8181818, 41.640149, 84.416884, 3.6666667, 39.507667, 64.61277, 3.5384615, 24.167144, 31.025997, 4, 45.952714, 80.461405, 3.9090909, 47.662083, 90.438818, 3.8333333, 33.911202, 59.016305, 3.7692308, 36.126326, 42.985179, 4, 39.486047, 79.550294, 4, 50.19027, 105.2747, 4, 37.6469, 62.752003, 4, 41.892876, 48.751729), tolerance = 1e-07) expect_equal(x10$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x10$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x10$iterations[3, ], c(6, 3, 2, 1)) expect_equal(x10$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x10$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x10$futilityStop, c(0.4, 0.7, 0.8, 0.9), tolerance = 1e-07) expect_equal(x10$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x10$futilityPerStage[2, ], c(0.4, 0.7, 0.8, 0.9), tolerance = 1e-07) expect_equal(x10$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x10$earlyStop[2, ], c(0.4, 0.7, 0.8, 0.9), tolerance = 1e-07) expect_equal(x10$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x10$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x10$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x10$selectedArms)), c(1, 0.6, 0.6, 1, 0.3, 0.3, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.5, 0.4, 1, 0.1, 0, 1, 0.4, 0, 1, 0.1, 0, 1, 0.2, 0, 1, 0.4, 0, 1, 0.3, 0, 1, 0.3, 0, 1, 0.2, 0.1, 1, 0.4, 0.1, 1, 0.4, 0, 1, 0.5, 0), tolerance = 1e-07) expect_equal(x10$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x10$numberOfActiveArms[2, ], c(1.5, 1.2, 1.3, 1), tolerance = 1e-07) expect_equal(x10$numberOfActiveArms[3, ], c(1.8333333, 1.3333333, 1, 1), tolerance = 1e-07) expect_equal(x10$expectedNumberOfEvents, c(148.64919, 116.07216, 81.180483, 62.574824), tolerance = 1e-07) expect_equal(unlist(as.list(x10$singleNumberOfEventsPerStage)), c(2, 20.705215, 34.508692, 1.9090909, 13.474672, 44.915572, 1.8333333, 5.5231227, 27.615613, 1.7692308, 0.78876811, 7.8876811, 2, 14.380832, 22.222222, 2, 5.2380952, 0, 2, 13.614724, 0, 2, 0.34505655, 0, 2, 10, 0, 2.0909091, 11.16912, 0, 2.1666667, 7.8515929, 0, 2.2307692, 12.073469, 0, 2, 3.5333333, 5.5555556, 2.1818182, 13.606399, 12.307692, 2.3333333, 11.420624, 0, 2.4615385, 17.609251, 0, 2, 31.952714, 34.508692, 1.8181818, 32.583872, 42.776735, 1.6666667, 22.226276, 25.105103, 1.5384615, 20.283626, 6.8588531), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.0031444794, 0.00037604601, 0.038145414, 0.045847923), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[3, ], c(7.9302274e-08, 1.361166e-06, 0.16667791, 0.040805908), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x10), NA))) expect_output(print(x10)$show()) invisible(capture.output(expect_error(summary(x10), NA))) expect_output(summary(x10)$show()) x10CodeBased <- eval(parse(text = getObjectRCode(x10, stringWrapParagraphWidth = NULL))) expect_equal(x10CodeBased$eventsPerStage, x10$eventsPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$iterations, x10$iterations, tolerance = 1e-05) expect_equal(x10CodeBased$rejectAtLeastOne, x10$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x10CodeBased$rejectedArmsPerStage, x10$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$futilityStop, x10$futilityStop, tolerance = 1e-05) expect_equal(x10CodeBased$futilityPerStage, x10$futilityPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$earlyStop, x10$earlyStop, tolerance = 1e-05) expect_equal(x10CodeBased$successPerStage, x10$successPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$selectedArms, x10$selectedArms, tolerance = 1e-05) expect_equal(x10CodeBased$numberOfActiveArms, x10$numberOfActiveArms, tolerance = 1e-05) expect_equal(x10CodeBased$expectedNumberOfEvents, x10$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x10CodeBased$singleNumberOfEventsPerStage, x10$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$conditionalPowerAchieved, x10$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x10), "character") df <- as.data.frame(x10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x11 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, directionUpper = FALSE, threshold = 0, plannedEvents = c(10, 30, 50), omegaMaxVector = 1 / seq(0.1, 0.3, 0.1), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), intersectionTest = "Hierarchical", maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x11' with expected results expect_equal(unlist(as.list(x11$eventsPerStage)), c(1.5454545, 1.5454545, 1.5454545, 2, 2, 2, 2.3846154, 2.3846154, 2.3846154, 2.3636364, 2.3636364, 2.3636364, 2.6666667, 2.6666667, 2.6666667, 2.9230769, 2.9230769, 2.9230769, 3.1818182, 3.1818182, 3.1818182, 3.3333333, 3.3333333, 3.3333333, 3.4615385, 3.4615385, 3.4615385, 4, 4, 4, 4, 4, 4, 4, 4, 4), tolerance = 1e-07) expect_equal(x11$iterations[1, ], c(10, 10, 10)) expect_equal(x11$iterations[2, ], c(0, 0, 0)) expect_equal(x11$iterations[3, ], c(0, 0, 0)) expect_equal(x11$rejectAtLeastOne, c(0, 0, 0)) expect_equal(unlist(as.list(x11$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x11$futilityStop, c(1, 1, 1)) expect_equal(x11$futilityPerStage[1, ], c(1, 1, 1)) expect_equal(x11$futilityPerStage[2, ], c(0, 0, 0)) expect_equal(x11$earlyStop[1, ], c(1, 1, 1)) expect_equal(x11$earlyStop[2, ], c(0, 0, 0)) expect_equal(x11$successPerStage[1, ], c(0, 0, 0)) expect_equal(x11$successPerStage[2, ], c(0, 0, 0)) expect_equal(x11$successPerStage[3, ], c(0, 0, 0)) expect_equal(unlist(as.list(x11$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)) expect_equal(x11$numberOfActiveArms[1, ], c(4, 4, 4)) expect_equal(x11$numberOfActiveArms[2, ], c(NaN, NaN, NaN)) expect_equal(x11$numberOfActiveArms[3, ], c(NaN, NaN, NaN)) expect_equal(x11$expectedNumberOfEvents, c(NaN, NaN, NaN)) expect_equal(unlist(as.list(x11$singleNumberOfEventsPerStage)), c(1.1818182, NaN, NaN, 1.3333333, NaN, NaN, 1.4615385, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2.8181818, NaN, NaN, 2.6666667, NaN, NaN, 2.5384615, NaN, NaN, 3.6363636, NaN, NaN, 3.3333333, NaN, NaN, 3.0769231, NaN, NaN, 0.36363636, NaN, NaN, 0.66666667, NaN, NaN, 0.92307692, NaN, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x11), NA))) expect_output(print(x11)$show()) invisible(capture.output(expect_error(summary(x11), NA))) expect_output(summary(x11)$show()) x11CodeBased <- eval(parse(text = getObjectRCode(x11, stringWrapParagraphWidth = NULL))) expect_equal(x11CodeBased$eventsPerStage, x11$eventsPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$iterations, x11$iterations, tolerance = 1e-05) expect_equal(x11CodeBased$rejectAtLeastOne, x11$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x11CodeBased$rejectedArmsPerStage, x11$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$futilityStop, x11$futilityStop, tolerance = 1e-05) expect_equal(x11CodeBased$futilityPerStage, x11$futilityPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$earlyStop, x11$earlyStop, tolerance = 1e-05) expect_equal(x11CodeBased$successPerStage, x11$successPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$selectedArms, x11$selectedArms, tolerance = 1e-05) expect_equal(x11CodeBased$numberOfActiveArms, x11$numberOfActiveArms, tolerance = 1e-05) expect_equal(x11CodeBased$expectedNumberOfEvents, x11$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x11CodeBased$singleNumberOfEventsPerStage, x11$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_type(names(x11), "character") df <- as.data.frame(x11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x12 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, directionUpper = FALSE, threshold = 0, plannedEvents = c(10, 30, 50), omegaMaxVector = 1 / seq(1, 1.6, 0.2), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), intersectionTest = "Hierarchical", maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x12' with expected results expect_equal(unlist(as.list(x12$eventsPerStage)), c(4, 4, 4, 4.2727273, 4.2727273, 4.2727273, 4.5, 4.5, 4.5, 4.6923077, 4.6923077, 4.6923077, 4, 4, 4, 4.1818182, 4.1818182, 4.1818182, 4.3333333, 4.3333333, 4.3333333, 4.4615385, 4.4615385, 4.4615385, 4, 4, 4, 4.0909091, 4.0909091, 4.0909091, 4.1666667, 4.1666667, 4.1666667, 4.2307692, 4.2307692, 4.2307692, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4), tolerance = 1e-07) expect_equal(x12$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x12$iterations[2, ], c(0, 0, 0, 0)) expect_equal(x12$iterations[3, ], c(0, 0, 0, 0)) expect_equal(x12$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x12$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x12$futilityStop, c(1, 1, 1, 1)) expect_equal(x12$futilityPerStage[1, ], c(1, 1, 1, 1)) expect_equal(x12$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x12$earlyStop[1, ], c(1, 1, 1, 1)) expect_equal(x12$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x12$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x12$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x12$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x12$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)) expect_equal(x12$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x12$numberOfActiveArms[2, ], c(NaN, NaN, NaN, NaN)) expect_equal(x12$numberOfActiveArms[3, ], c(NaN, NaN, NaN, NaN)) expect_equal(x12$expectedNumberOfEvents, c(NaN, NaN, NaN, NaN)) expect_equal(unlist(as.list(x12$singleNumberOfEventsPerStage)), c(2, NaN, NaN, 2.0909091, NaN, NaN, 2.1666667, NaN, NaN, 2.2307692, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 1.9090909, NaN, NaN, 1.8333333, NaN, NaN, 1.7692308, NaN, NaN, 2, NaN, NaN, 1.8181818, NaN, NaN, 1.6666667, NaN, NaN, 1.5384615, NaN, NaN, 2, NaN, NaN, 2.1818182, NaN, NaN, 2.3333333, NaN, NaN, 2.4615385, NaN, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x12), NA))) expect_output(print(x12)$show()) invisible(capture.output(expect_error(summary(x12), NA))) expect_output(summary(x12)$show()) x12CodeBased <- eval(parse(text = getObjectRCode(x12, stringWrapParagraphWidth = NULL))) expect_equal(x12CodeBased$eventsPerStage, x12$eventsPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$iterations, x12$iterations, tolerance = 1e-05) expect_equal(x12CodeBased$rejectAtLeastOne, x12$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x12CodeBased$rejectedArmsPerStage, x12$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$futilityStop, x12$futilityStop, tolerance = 1e-05) expect_equal(x12CodeBased$futilityPerStage, x12$futilityPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$earlyStop, x12$earlyStop, tolerance = 1e-05) expect_equal(x12CodeBased$successPerStage, x12$successPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$selectedArms, x12$selectedArms, tolerance = 1e-05) expect_equal(x12CodeBased$numberOfActiveArms, x12$numberOfActiveArms, tolerance = 1e-05) expect_equal(x12CodeBased$expectedNumberOfEvents, x12$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x12CodeBased$singleNumberOfEventsPerStage, x12$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_type(names(x12), "character") df <- as.data.frame(x12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x13 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "userDefined", activeArms = 4, directionUpper = FALSE, threshold = 0, plannedEvents = c(10, 30, 50), adaptations = rep(TRUE, 2), effectMatrix = matrix(c(0.1, 0.2, 0.3, 0.4, 0.2, 0.3, 0.4, 0.5), ncol = 4), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), intersectionTest = "Sidak", maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x13' with expected results expect_equal(unlist(as.list(x13$eventsPerStage)), c(5.5, 5.5, 5.5, 5, 5, 5, 6.5, 6.5, 6.5, 5.8333333, 5.8333333, 5.8333333, 6, 6, 6, 5.4166667, 5.4166667, 5.4166667, 7, 7, 7, 6.25, 6.25, 6.25), tolerance = 1e-07) expect_equal(x13$iterations[1, ], c(10, 10)) expect_equal(x13$iterations[2, ], c(0, 0)) expect_equal(x13$iterations[3, ], c(0, 0)) expect_equal(x13$rejectAtLeastOne, c(0, 0)) expect_equal(unlist(as.list(x13$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x13$futilityStop, c(1, 1)) expect_equal(x13$futilityPerStage[1, ], c(1, 1)) expect_equal(x13$futilityPerStage[2, ], c(0, 0)) expect_equal(x13$earlyStop[1, ], c(1, 1)) expect_equal(x13$earlyStop[2, ], c(0, 0)) expect_equal(x13$successPerStage[1, ], c(0, 0)) expect_equal(x13$successPerStage[2, ], c(0, 0)) expect_equal(x13$successPerStage[3, ], c(0, 0)) expect_equal(unlist(as.list(x13$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)) expect_equal(x13$numberOfActiveArms[1, ], c(4, 4)) expect_equal(x13$numberOfActiveArms[2, ], c(NaN, NaN)) expect_equal(x13$numberOfActiveArms[3, ], c(NaN, NaN)) expect_equal(x13$expectedNumberOfEvents, c(NaN, NaN)) expect_equal(unlist(as.list(x13$singleNumberOfEventsPerStage)), c(0.5, NaN, NaN, 0.83333333, NaN, NaN, 1.5, NaN, NaN, 1.6666667, NaN, NaN, 1, NaN, NaN, 1.25, NaN, NaN, 2, NaN, NaN, 2.0833333, NaN, NaN, 5, NaN, NaN, 4.1666667, NaN, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x13), NA))) expect_output(print(x13)$show()) invisible(capture.output(expect_error(summary(x13), NA))) expect_output(summary(x13)$show()) x13CodeBased <- eval(parse(text = getObjectRCode(x13, stringWrapParagraphWidth = NULL))) expect_equal(x13CodeBased$eventsPerStage, x13$eventsPerStage, tolerance = 1e-05) expect_equal(x13CodeBased$iterations, x13$iterations, tolerance = 1e-05) expect_equal(x13CodeBased$rejectAtLeastOne, x13$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x13CodeBased$rejectedArmsPerStage, x13$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x13CodeBased$futilityStop, x13$futilityStop, tolerance = 1e-05) expect_equal(x13CodeBased$futilityPerStage, x13$futilityPerStage, tolerance = 1e-05) expect_equal(x13CodeBased$earlyStop, x13$earlyStop, tolerance = 1e-05) expect_equal(x13CodeBased$successPerStage, x13$successPerStage, tolerance = 1e-05) expect_equal(x13CodeBased$selectedArms, x13$selectedArms, tolerance = 1e-05) expect_equal(x13CodeBased$numberOfActiveArms, x13$numberOfActiveArms, tolerance = 1e-05) expect_equal(x13CodeBased$expectedNumberOfEvents, x13$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x13CodeBased$singleNumberOfEventsPerStage, x13$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_type(names(x13), "character") df <- as.data.frame(x13) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x13) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x14 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "sigmoidEmax", gED50 = 2, slope = 0.5, activeArms = 4, directionUpper = FALSE, threshold = 0, plannedEvents = c(10, 30, 50), omegaMaxVector = 1 / seq(1, 1.6, 0.2), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), intersectionTest = "Sidak", maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x14' with expected results expect_equal(unlist(as.list(x14$eventsPerStage)), c(4, 4, 4, 4.1452587, 4.1452587, 4.1452587, 4.2627857, 4.2627857, 4.2627857, 4.3598306, 4.3598306, 4.3598306, 4, 4, 4, 4.1145653, 4.1145653, 4.1145653, 4.2072587, 4.2072587, 4.2072587, 4.2837979, 4.2837979, 4.2837979, 4, 4, 4, 4.0964933, 4.0964933, 4.0964933, 4.1745649, 4.1745649, 4.1745649, 4.2390305, 4.2390305, 4.2390305, 4, 4, 4, 4.0838719, 4.0838719, 4.0838719, 4.1517317, 4.1517317, 4.1517317, 4.2077651, 4.2077651, 4.2077651), tolerance = 1e-07) expect_equal(x14$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x14$iterations[2, ], c(0, 0, 0, 0)) expect_equal(x14$iterations[3, ], c(0, 0, 0, 0)) expect_equal(x14$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x14$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x14$futilityStop, c(1, 1, 1, 1)) expect_equal(x14$futilityPerStage[1, ], c(1, 1, 1, 1)) expect_equal(x14$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x14$earlyStop[1, ], c(1, 1, 1, 1)) expect_equal(x14$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x14$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x14$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x14$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x14$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)) expect_equal(x14$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x14$numberOfActiveArms[2, ], c(NaN, NaN, NaN, NaN)) expect_equal(x14$numberOfActiveArms[3, ], c(NaN, NaN, NaN, NaN)) expect_equal(x14$expectedNumberOfEvents, c(NaN, NaN, NaN, NaN)) expect_equal(unlist(as.list(x14$singleNumberOfEventsPerStage)), c(2, NaN, NaN, 1.9985289, NaN, NaN, 1.9973387, NaN, NaN, 1.996356, NaN, NaN, 2, NaN, NaN, 1.9678356, NaN, NaN, 1.9418117, NaN, NaN, 1.9203232, NaN, NaN, 2, NaN, NaN, 1.9497636, NaN, NaN, 1.9091179, NaN, NaN, 1.8755558, NaN, NaN, 2, NaN, NaN, 1.9371422, NaN, NaN, 1.8862847, NaN, NaN, 1.8442904, NaN, NaN, 2, NaN, NaN, 2.1467297, NaN, NaN, 2.265447, NaN, NaN, 2.3634747, NaN, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x14), NA))) expect_output(print(x14)$show()) invisible(capture.output(expect_error(summary(x14), NA))) expect_output(summary(x14)$show()) x14CodeBased <- eval(parse(text = getObjectRCode(x14, stringWrapParagraphWidth = NULL))) expect_equal(x14CodeBased$eventsPerStage, x14$eventsPerStage, tolerance = 1e-05) expect_equal(x14CodeBased$iterations, x14$iterations, tolerance = 1e-05) expect_equal(x14CodeBased$rejectAtLeastOne, x14$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x14CodeBased$rejectedArmsPerStage, x14$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x14CodeBased$futilityStop, x14$futilityStop, tolerance = 1e-05) expect_equal(x14CodeBased$futilityPerStage, x14$futilityPerStage, tolerance = 1e-05) expect_equal(x14CodeBased$earlyStop, x14$earlyStop, tolerance = 1e-05) expect_equal(x14CodeBased$successPerStage, x14$successPerStage, tolerance = 1e-05) expect_equal(x14CodeBased$selectedArms, x14$selectedArms, tolerance = 1e-05) expect_equal(x14CodeBased$numberOfActiveArms, x14$numberOfActiveArms, tolerance = 1e-05) expect_equal(x14CodeBased$expectedNumberOfEvents, x14$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x14CodeBased$singleNumberOfEventsPerStage, x14$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_type(names(x14), "character") df <- as.data.frame(x14) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x14) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x15 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, directionUpper = FALSE, threshold = 0, typeOfSelection = "all", plannedEvents = c(10, 30, 50), omegaMaxVector = 1 / seq(1, 1.6, 0.2), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), intersectionTest = "Sidak", maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x15' with expected results expect_equal(unlist(as.list(x15$eventsPerStage)), c(4, 4, 4, 4.2727273, 4.2727273, 4.2727273, 4.5, 4.5, 4.5, 4.6923077, 4.6923077, 4.6923077, 4, 4, 4, 4.1818182, 4.1818182, 4.1818182, 4.3333333, 4.3333333, 4.3333333, 4.4615385, 4.4615385, 4.4615385, 4, 4, 4, 4.0909091, 4.0909091, 4.0909091, 4.1666667, 4.1666667, 4.1666667, 4.2307692, 4.2307692, 4.2307692, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4), tolerance = 1e-07) expect_equal(x15$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x15$iterations[2, ], c(0, 0, 0, 0)) expect_equal(x15$iterations[3, ], c(0, 0, 0, 0)) expect_equal(x15$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x15$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x15$futilityStop, c(1, 1, 1, 1)) expect_equal(x15$futilityPerStage[1, ], c(1, 1, 1, 1)) expect_equal(x15$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x15$earlyStop[1, ], c(1, 1, 1, 1)) expect_equal(x15$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x15$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x15$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x15$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x15$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)) expect_equal(x15$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x15$numberOfActiveArms[2, ], c(NaN, NaN, NaN, NaN)) expect_equal(x15$numberOfActiveArms[3, ], c(NaN, NaN, NaN, NaN)) expect_equal(x15$expectedNumberOfEvents, c(NaN, NaN, NaN, NaN)) expect_equal(unlist(as.list(x15$singleNumberOfEventsPerStage)), c(2, NaN, NaN, 2.0909091, NaN, NaN, 2.1666667, NaN, NaN, 2.2307692, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 1.9090909, NaN, NaN, 1.8333333, NaN, NaN, 1.7692308, NaN, NaN, 2, NaN, NaN, 1.8181818, NaN, NaN, 1.6666667, NaN, NaN, 1.5384615, NaN, NaN, 2, NaN, NaN, 2.1818182, NaN, NaN, 2.3333333, NaN, NaN, 2.4615385, NaN, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x15), NA))) expect_output(print(x15)$show()) invisible(capture.output(expect_error(summary(x15), NA))) expect_output(summary(x15)$show()) x15CodeBased <- eval(parse(text = getObjectRCode(x15, stringWrapParagraphWidth = NULL))) expect_equal(x15CodeBased$eventsPerStage, x15$eventsPerStage, tolerance = 1e-05) expect_equal(x15CodeBased$iterations, x15$iterations, tolerance = 1e-05) expect_equal(x15CodeBased$rejectAtLeastOne, x15$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x15CodeBased$rejectedArmsPerStage, x15$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x15CodeBased$futilityStop, x15$futilityStop, tolerance = 1e-05) expect_equal(x15CodeBased$futilityPerStage, x15$futilityPerStage, tolerance = 1e-05) expect_equal(x15CodeBased$earlyStop, x15$earlyStop, tolerance = 1e-05) expect_equal(x15CodeBased$successPerStage, x15$successPerStage, tolerance = 1e-05) expect_equal(x15CodeBased$selectedArms, x15$selectedArms, tolerance = 1e-05) expect_equal(x15CodeBased$numberOfActiveArms, x15$numberOfActiveArms, tolerance = 1e-05) expect_equal(x15CodeBased$expectedNumberOfEvents, x15$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x15CodeBased$singleNumberOfEventsPerStage, x15$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_type(names(x15), "character") df <- as.data.frame(x15) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x15) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x16 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, directionUpper = FALSE, threshold = 0, typeOfSelection = "rBest", rValue = 2, plannedEvents = c(10, 30, 50), omegaMaxVector = 1 / seq(1, 1.6, 0.2), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), intersectionTest = "Simes", maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x16' with expected results expect_equal(unlist(as.list(x16$eventsPerStage)), c(4, 4, 4, 4.2727273, 4.2727273, 4.2727273, 4.5, 4.5, 4.5, 4.6923077, 4.6923077, 4.6923077, 4, 4, 4, 4.1818182, 4.1818182, 4.1818182, 4.3333333, 4.3333333, 4.3333333, 4.4615385, 4.4615385, 4.4615385, 4, 4, 4, 4.0909091, 4.0909091, 4.0909091, 4.1666667, 4.1666667, 4.1666667, 4.2307692, 4.2307692, 4.2307692, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4), tolerance = 1e-07) expect_equal(x16$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x16$iterations[2, ], c(0, 0, 0, 0)) expect_equal(x16$iterations[3, ], c(0, 0, 0, 0)) expect_equal(x16$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x16$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x16$futilityStop, c(1, 1, 1, 1)) expect_equal(x16$futilityPerStage[1, ], c(1, 1, 1, 1)) expect_equal(x16$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x16$earlyStop[1, ], c(1, 1, 1, 1)) expect_equal(x16$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x16$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x16$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x16$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x16$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)) expect_equal(x16$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x16$numberOfActiveArms[2, ], c(NaN, NaN, NaN, NaN)) expect_equal(x16$numberOfActiveArms[3, ], c(NaN, NaN, NaN, NaN)) expect_equal(x16$expectedNumberOfEvents, c(NaN, NaN, NaN, NaN)) expect_equal(unlist(as.list(x16$singleNumberOfEventsPerStage)), c(2, NaN, NaN, 2.0909091, NaN, NaN, 2.1666667, NaN, NaN, 2.2307692, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 1.9090909, NaN, NaN, 1.8333333, NaN, NaN, 1.7692308, NaN, NaN, 2, NaN, NaN, 1.8181818, NaN, NaN, 1.6666667, NaN, NaN, 1.5384615, NaN, NaN, 2, NaN, NaN, 2.1818182, NaN, NaN, 2.3333333, NaN, NaN, 2.4615385, NaN, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x16), NA))) expect_output(print(x16)$show()) invisible(capture.output(expect_error(summary(x16), NA))) expect_output(summary(x16)$show()) x16CodeBased <- eval(parse(text = getObjectRCode(x16, stringWrapParagraphWidth = NULL))) expect_equal(x16CodeBased$eventsPerStage, x16$eventsPerStage, tolerance = 1e-05) expect_equal(x16CodeBased$iterations, x16$iterations, tolerance = 1e-05) expect_equal(x16CodeBased$rejectAtLeastOne, x16$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x16CodeBased$rejectedArmsPerStage, x16$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x16CodeBased$futilityStop, x16$futilityStop, tolerance = 1e-05) expect_equal(x16CodeBased$futilityPerStage, x16$futilityPerStage, tolerance = 1e-05) expect_equal(x16CodeBased$earlyStop, x16$earlyStop, tolerance = 1e-05) expect_equal(x16CodeBased$successPerStage, x16$successPerStage, tolerance = 1e-05) expect_equal(x16CodeBased$selectedArms, x16$selectedArms, tolerance = 1e-05) expect_equal(x16CodeBased$numberOfActiveArms, x16$numberOfActiveArms, tolerance = 1e-05) expect_equal(x16CodeBased$expectedNumberOfEvents, x16$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x16CodeBased$singleNumberOfEventsPerStage, x16$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_type(names(x16), "character") df <- as.data.frame(x16) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x16) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x17 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, directionUpper = FALSE, threshold = 0, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedEvents = c(10, 30, 50), omegaMaxVector = 1 / seq(1, 1.6, 0.2), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), intersectionTest = "Simes", maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x17' with expected results expect_equal(unlist(as.list(x17$eventsPerStage)), c(4, 4, 4, 4.2727273, 4.2727273, 4.2727273, 4.5, 4.5, 4.5, 4.6923077, 4.6923077, 4.6923077, 4, 4, 4, 4.1818182, 4.1818182, 4.1818182, 4.3333333, 4.3333333, 4.3333333, 4.4615385, 4.4615385, 4.4615385, 4, 4, 4, 4.0909091, 4.0909091, 4.0909091, 4.1666667, 4.1666667, 4.1666667, 4.2307692, 4.2307692, 4.2307692, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4), tolerance = 1e-07) expect_equal(x17$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x17$iterations[2, ], c(0, 0, 0, 0)) expect_equal(x17$iterations[3, ], c(0, 0, 0, 0)) expect_equal(x17$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x17$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x17$futilityStop, c(1, 1, 1, 1)) expect_equal(x17$futilityPerStage[1, ], c(1, 1, 1, 1)) expect_equal(x17$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x17$earlyStop[1, ], c(1, 1, 1, 1)) expect_equal(x17$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x17$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x17$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x17$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x17$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)) expect_equal(x17$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x17$numberOfActiveArms[2, ], c(NaN, NaN, NaN, NaN)) expect_equal(x17$numberOfActiveArms[3, ], c(NaN, NaN, NaN, NaN)) expect_equal(x17$expectedNumberOfEvents, c(NaN, NaN, NaN, NaN)) expect_equal(unlist(as.list(x17$singleNumberOfEventsPerStage)), c(2, NaN, NaN, 2.0909091, NaN, NaN, 2.1666667, NaN, NaN, 2.2307692, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 1.9090909, NaN, NaN, 1.8333333, NaN, NaN, 1.7692308, NaN, NaN, 2, NaN, NaN, 1.8181818, NaN, NaN, 1.6666667, NaN, NaN, 1.5384615, NaN, NaN, 2, NaN, NaN, 2.1818182, NaN, NaN, 2.3333333, NaN, NaN, 2.4615385, NaN, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x17), NA))) expect_output(print(x17)$show()) invisible(capture.output(expect_error(summary(x17), NA))) expect_output(summary(x17)$show()) x17CodeBased <- eval(parse(text = getObjectRCode(x17, stringWrapParagraphWidth = NULL))) expect_equal(x17CodeBased$eventsPerStage, x17$eventsPerStage, tolerance = 1e-05) expect_equal(x17CodeBased$iterations, x17$iterations, tolerance = 1e-05) expect_equal(x17CodeBased$rejectAtLeastOne, x17$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x17CodeBased$rejectedArmsPerStage, x17$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x17CodeBased$futilityStop, x17$futilityStop, tolerance = 1e-05) expect_equal(x17CodeBased$futilityPerStage, x17$futilityPerStage, tolerance = 1e-05) expect_equal(x17CodeBased$earlyStop, x17$earlyStop, tolerance = 1e-05) expect_equal(x17CodeBased$successPerStage, x17$successPerStage, tolerance = 1e-05) expect_equal(x17CodeBased$selectedArms, x17$selectedArms, tolerance = 1e-05) expect_equal(x17CodeBased$numberOfActiveArms, x17$numberOfActiveArms, tolerance = 1e-05) expect_equal(x17CodeBased$expectedNumberOfEvents, x17$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x17CodeBased$singleNumberOfEventsPerStage, x17$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_type(names(x17), "character") df <- as.data.frame(x17) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x17) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x18 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, directionUpper = FALSE, threshold = 0, plannedEvents = c(10, 30, 50), omegaMaxVector = 1 / seq(1, 1.6, 0.2), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), intersectionTest = "Simes", maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x18' with expected results expect_equal(unlist(as.list(x18$eventsPerStage)), c(4, 4, 4, 4.2727273, 4.2727273, 4.2727273, 4.5, 4.5, 4.5, 4.6923077, 4.6923077, 4.6923077, 4, 4, 4, 4.1818182, 4.1818182, 4.1818182, 4.3333333, 4.3333333, 4.3333333, 4.4615385, 4.4615385, 4.4615385, 4, 4, 4, 4.0909091, 4.0909091, 4.0909091, 4.1666667, 4.1666667, 4.1666667, 4.2307692, 4.2307692, 4.2307692, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4), tolerance = 1e-07) expect_equal(x18$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x18$iterations[2, ], c(0, 0, 0, 0)) expect_equal(x18$iterations[3, ], c(0, 0, 0, 0)) expect_equal(x18$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x18$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x18$futilityStop, c(1, 1, 1, 1)) expect_equal(x18$futilityPerStage[1, ], c(1, 1, 1, 1)) expect_equal(x18$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x18$earlyStop[1, ], c(1, 1, 1, 1)) expect_equal(x18$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x18$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x18$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x18$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x18$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)) expect_equal(x18$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x18$numberOfActiveArms[2, ], c(NaN, NaN, NaN, NaN)) expect_equal(x18$numberOfActiveArms[3, ], c(NaN, NaN, NaN, NaN)) expect_equal(x18$expectedNumberOfEvents, c(NaN, NaN, NaN, NaN)) expect_equal(unlist(as.list(x18$singleNumberOfEventsPerStage)), c(2, NaN, NaN, 2.0909091, NaN, NaN, 2.1666667, NaN, NaN, 2.2307692, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 1.9090909, NaN, NaN, 1.8333333, NaN, NaN, 1.7692308, NaN, NaN, 2, NaN, NaN, 1.8181818, NaN, NaN, 1.6666667, NaN, NaN, 1.5384615, NaN, NaN, 2, NaN, NaN, 2.1818182, NaN, NaN, 2.3333333, NaN, NaN, 2.4615385, NaN, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x18), NA))) expect_output(print(x18)$show()) invisible(capture.output(expect_error(summary(x18), NA))) expect_output(summary(x18)$show()) x18CodeBased <- eval(parse(text = getObjectRCode(x18, stringWrapParagraphWidth = NULL))) expect_equal(x18CodeBased$eventsPerStage, x18$eventsPerStage, tolerance = 1e-05) expect_equal(x18CodeBased$iterations, x18$iterations, tolerance = 1e-05) expect_equal(x18CodeBased$rejectAtLeastOne, x18$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x18CodeBased$rejectedArmsPerStage, x18$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x18CodeBased$futilityStop, x18$futilityStop, tolerance = 1e-05) expect_equal(x18CodeBased$futilityPerStage, x18$futilityPerStage, tolerance = 1e-05) expect_equal(x18CodeBased$earlyStop, x18$earlyStop, tolerance = 1e-05) expect_equal(x18CodeBased$successPerStage, x18$successPerStage, tolerance = 1e-05) expect_equal(x18CodeBased$selectedArms, x18$selectedArms, tolerance = 1e-05) expect_equal(x18CodeBased$numberOfActiveArms, x18$numberOfActiveArms, tolerance = 1e-05) expect_equal(x18CodeBased$expectedNumberOfEvents, x18$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x18CodeBased$singleNumberOfEventsPerStage, x18$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_type(names(x18), "character") df <- as.data.frame(x18) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x18) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x19 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, directionUpper = FALSE, threshold = 0, typeOfSelection = "all", plannedEvents = c(10, 30, 50), omegaMaxVector = 1 / seq(1, 1.6, 0.2), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), intersectionTest = "Bonferroni", maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x19' with expected results expect_equal(unlist(as.list(x19$eventsPerStage)), c(4, 4, 4, 4.2727273, 4.2727273, 4.2727273, 4.5, 4.5, 4.5, 4.6923077, 4.6923077, 4.6923077, 4, 4, 4, 4.1818182, 4.1818182, 4.1818182, 4.3333333, 4.3333333, 4.3333333, 4.4615385, 4.4615385, 4.4615385, 4, 4, 4, 4.0909091, 4.0909091, 4.0909091, 4.1666667, 4.1666667, 4.1666667, 4.2307692, 4.2307692, 4.2307692, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4), tolerance = 1e-07) expect_equal(x19$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x19$iterations[2, ], c(0, 0, 0, 0)) expect_equal(x19$iterations[3, ], c(0, 0, 0, 0)) expect_equal(x19$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x19$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x19$futilityStop, c(1, 1, 1, 1)) expect_equal(x19$futilityPerStage[1, ], c(1, 1, 1, 1)) expect_equal(x19$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x19$earlyStop[1, ], c(1, 1, 1, 1)) expect_equal(x19$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x19$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x19$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x19$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x19$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)) expect_equal(x19$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x19$numberOfActiveArms[2, ], c(NaN, NaN, NaN, NaN)) expect_equal(x19$numberOfActiveArms[3, ], c(NaN, NaN, NaN, NaN)) expect_equal(x19$expectedNumberOfEvents, c(NaN, NaN, NaN, NaN)) expect_equal(unlist(as.list(x19$singleNumberOfEventsPerStage)), c(2, NaN, NaN, 2.0909091, NaN, NaN, 2.1666667, NaN, NaN, 2.2307692, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 1.9090909, NaN, NaN, 1.8333333, NaN, NaN, 1.7692308, NaN, NaN, 2, NaN, NaN, 1.8181818, NaN, NaN, 1.6666667, NaN, NaN, 1.5384615, NaN, NaN, 2, NaN, NaN, 2.1818182, NaN, NaN, 2.3333333, NaN, NaN, 2.4615385, NaN, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x19), NA))) expect_output(print(x19)$show()) invisible(capture.output(expect_error(summary(x19), NA))) expect_output(summary(x19)$show()) x19CodeBased <- eval(parse(text = getObjectRCode(x19, stringWrapParagraphWidth = NULL))) expect_equal(x19CodeBased$eventsPerStage, x19$eventsPerStage, tolerance = 1e-05) expect_equal(x19CodeBased$iterations, x19$iterations, tolerance = 1e-05) expect_equal(x19CodeBased$rejectAtLeastOne, x19$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x19CodeBased$rejectedArmsPerStage, x19$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x19CodeBased$futilityStop, x19$futilityStop, tolerance = 1e-05) expect_equal(x19CodeBased$futilityPerStage, x19$futilityPerStage, tolerance = 1e-05) expect_equal(x19CodeBased$earlyStop, x19$earlyStop, tolerance = 1e-05) expect_equal(x19CodeBased$successPerStage, x19$successPerStage, tolerance = 1e-05) expect_equal(x19CodeBased$selectedArms, x19$selectedArms, tolerance = 1e-05) expect_equal(x19CodeBased$numberOfActiveArms, x19$numberOfActiveArms, tolerance = 1e-05) expect_equal(x19CodeBased$expectedNumberOfEvents, x19$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x19CodeBased$singleNumberOfEventsPerStage, x19$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_type(names(x19), "character") df <- as.data.frame(x19) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x19) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x20 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, directionUpper = FALSE, threshold = 0, typeOfSelection = "rBest", rValue = 2, plannedEvents = c(10, 30, 50), omegaMaxVector = 1 / seq(1, 1.6, 0.2), adaptations = c(TRUE, FALSE), intersectionTest = "Bonferroni", conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x20' with expected results expect_equal(unlist(as.list(x20$eventsPerStage)), c(4, 4, 4, 4.2727273, 4.2727273, 4.2727273, 4.5, 4.5, 4.5, 4.6923077, 4.6923077, 4.6923077, 4, 4, 4, 4.1818182, 4.1818182, 4.1818182, 4.3333333, 4.3333333, 4.3333333, 4.4615385, 4.4615385, 4.4615385, 4, 4, 4, 4.0909091, 4.0909091, 4.0909091, 4.1666667, 4.1666667, 4.1666667, 4.2307692, 4.2307692, 4.2307692, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4), tolerance = 1e-07) expect_equal(x20$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x20$iterations[2, ], c(0, 0, 0, 0)) expect_equal(x20$iterations[3, ], c(0, 0, 0, 0)) expect_equal(x20$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x20$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x20$futilityStop, c(1, 1, 1, 1)) expect_equal(x20$futilityPerStage[1, ], c(1, 1, 1, 1)) expect_equal(x20$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x20$earlyStop[1, ], c(1, 1, 1, 1)) expect_equal(x20$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x20$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x20$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x20$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x20$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)) expect_equal(x20$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x20$numberOfActiveArms[2, ], c(NaN, NaN, NaN, NaN)) expect_equal(x20$numberOfActiveArms[3, ], c(NaN, NaN, NaN, NaN)) expect_equal(x20$expectedNumberOfEvents, c(NaN, NaN, NaN, NaN)) expect_equal(unlist(as.list(x20$singleNumberOfEventsPerStage)), c(2, NaN, NaN, 2.0909091, NaN, NaN, 2.1666667, NaN, NaN, 2.2307692, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 1.9090909, NaN, NaN, 1.8333333, NaN, NaN, 1.7692308, NaN, NaN, 2, NaN, NaN, 1.8181818, NaN, NaN, 1.6666667, NaN, NaN, 1.5384615, NaN, NaN, 2, NaN, NaN, 2.1818182, NaN, NaN, 2.3333333, NaN, NaN, 2.4615385, NaN, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x20), NA))) expect_output(print(x20)$show()) invisible(capture.output(expect_error(summary(x20), NA))) expect_output(summary(x20)$show()) x20CodeBased <- eval(parse(text = getObjectRCode(x20, stringWrapParagraphWidth = NULL))) expect_equal(x20CodeBased$eventsPerStage, x20$eventsPerStage, tolerance = 1e-05) expect_equal(x20CodeBased$iterations, x20$iterations, tolerance = 1e-05) expect_equal(x20CodeBased$rejectAtLeastOne, x20$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x20CodeBased$rejectedArmsPerStage, x20$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x20CodeBased$futilityStop, x20$futilityStop, tolerance = 1e-05) expect_equal(x20CodeBased$futilityPerStage, x20$futilityPerStage, tolerance = 1e-05) expect_equal(x20CodeBased$earlyStop, x20$earlyStop, tolerance = 1e-05) expect_equal(x20CodeBased$successPerStage, x20$successPerStage, tolerance = 1e-05) expect_equal(x20CodeBased$selectedArms, x20$selectedArms, tolerance = 1e-05) expect_equal(x20CodeBased$numberOfActiveArms, x20$numberOfActiveArms, tolerance = 1e-05) expect_equal(x20CodeBased$expectedNumberOfEvents, x20$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x20CodeBased$singleNumberOfEventsPerStage, x20$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_type(names(x20), "character") df <- as.data.frame(x20) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x20) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x21 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, directionUpper = FALSE, threshold = 0, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedEvents = c(10, 30, 50), omegaMaxVector = 1 / seq(1, 1.6, 0.2), adaptations = c(TRUE, FALSE), intersectionTest = "Bonferroni", conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x21' with expected results expect_equal(unlist(as.list(x21$eventsPerStage)), c(4, 4, 4, 4.2727273, 4.2727273, 4.2727273, 4.5, 4.5, 4.5, 4.6923077, 4.6923077, 4.6923077, 4, 4, 4, 4.1818182, 4.1818182, 4.1818182, 4.3333333, 4.3333333, 4.3333333, 4.4615385, 4.4615385, 4.4615385, 4, 4, 4, 4.0909091, 4.0909091, 4.0909091, 4.1666667, 4.1666667, 4.1666667, 4.2307692, 4.2307692, 4.2307692, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4), tolerance = 1e-07) expect_equal(x21$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x21$iterations[2, ], c(0, 0, 0, 0)) expect_equal(x21$iterations[3, ], c(0, 0, 0, 0)) expect_equal(x21$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x21$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x21$futilityStop, c(1, 1, 1, 1)) expect_equal(x21$futilityPerStage[1, ], c(1, 1, 1, 1)) expect_equal(x21$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x21$earlyStop[1, ], c(1, 1, 1, 1)) expect_equal(x21$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x21$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x21$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x21$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x21$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0)) expect_equal(x21$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x21$numberOfActiveArms[2, ], c(NaN, NaN, NaN, NaN)) expect_equal(x21$numberOfActiveArms[3, ], c(NaN, NaN, NaN, NaN)) expect_equal(x21$expectedNumberOfEvents, c(NaN, NaN, NaN, NaN)) expect_equal(unlist(as.list(x21$singleNumberOfEventsPerStage)), c(2, NaN, NaN, 2.0909091, NaN, NaN, 2.1666667, NaN, NaN, 2.2307692, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 2, NaN, NaN, 1.9090909, NaN, NaN, 1.8333333, NaN, NaN, 1.7692308, NaN, NaN, 2, NaN, NaN, 1.8181818, NaN, NaN, 1.6666667, NaN, NaN, 1.5384615, NaN, NaN, 2, NaN, NaN, 2.1818182, NaN, NaN, 2.3333333, NaN, NaN, 2.4615385, NaN, NaN), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x21), NA))) expect_output(print(x21)$show()) invisible(capture.output(expect_error(summary(x21), NA))) expect_output(summary(x21)$show()) x21CodeBased <- eval(parse(text = getObjectRCode(x21, stringWrapParagraphWidth = NULL))) expect_equal(x21CodeBased$eventsPerStage, x21$eventsPerStage, tolerance = 1e-05) expect_equal(x21CodeBased$iterations, x21$iterations, tolerance = 1e-05) expect_equal(x21CodeBased$rejectAtLeastOne, x21$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x21CodeBased$rejectedArmsPerStage, x21$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x21CodeBased$futilityStop, x21$futilityStop, tolerance = 1e-05) expect_equal(x21CodeBased$futilityPerStage, x21$futilityPerStage, tolerance = 1e-05) expect_equal(x21CodeBased$earlyStop, x21$earlyStop, tolerance = 1e-05) expect_equal(x21CodeBased$successPerStage, x21$successPerStage, tolerance = 1e-05) expect_equal(x21CodeBased$selectedArms, x21$selectedArms, tolerance = 1e-05) expect_equal(x21CodeBased$numberOfActiveArms, x21$numberOfActiveArms, tolerance = 1e-05) expect_equal(x21CodeBased$expectedNumberOfEvents, x21$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x21CodeBased$singleNumberOfEventsPerStage, x21$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_type(names(x21), "character") df <- as.data.frame(x21) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x21) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x22 <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, directionUpper = FALSE, threshold = 0.1, plannedEvents = c(10, 30, 50), omegaMaxVector = seq(0.1, 0.3, 0.1), intersectionTest = "Bonferroni", conditionalPower = 0.8, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x22' with expected results expect_equal(unlist(as.list(x22$eventsPerStage)), c(6.4545455, 10.090909, 10.090909, 6, 9.1343922, 9.1343922, 5.6153846, 8.7178796, 8.7178796, 5.6363636, 9.2727273, 9.2727273, 5.3333333, 8.8427255, 8.8427255, 5.0769231, 8.7046706, 8.7046706, 4.8181818, 8.4545455, 8.4545455, 4.6666667, 8.1381491, 8.1381491, 4.5384615, 7.6409565, 7.6409565, 4, 8, 8, 4, 7.4677255, 7.4677255, 4, 7.7908192, 7.7908192), tolerance = 1e-07) expect_equal(x22$iterations[1, ], c(10, 10, 10)) expect_equal(x22$iterations[2, ], c(1, 4, 3)) expect_equal(x22$iterations[3, ], c(0, 0, 0)) expect_equal(x22$rejectAtLeastOne, c(0.1, 0.3, 0.2), tolerance = 1e-07) expect_equal(unlist(as.list(x22$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.2, 0, 0, 0.2, 0), tolerance = 1e-07) expect_equal(x22$futilityStop, c(0.9, 0.7, 0.8), tolerance = 1e-07) expect_equal(x22$futilityPerStage[1, ], c(0.9, 0.6, 0.7), tolerance = 1e-07) expect_equal(x22$futilityPerStage[2, ], c(0, 0.1, 0.1), tolerance = 1e-07) expect_equal(x22$earlyStop[1, ], c(0.9, 0.6, 0.7), tolerance = 1e-07) expect_equal(x22$earlyStop[2, ], c(0.1, 0.4, 0.3), tolerance = 1e-07) expect_equal(x22$successPerStage[1, ], c(0, 0, 0)) expect_equal(x22$successPerStage[2, ], c(0.1, 0.3, 0.2), tolerance = 1e-07) expect_equal(x22$successPerStage[3, ], c(0, 0, 0)) expect_equal(unlist(as.list(x22$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0.1, 0, 1, 0.1, 0, 1, 0, 0, 1, 0.1, 0, 1, 0, 0, 1, 0.1, 0, 1, 0.2, 0, 1, 0.2, 0), tolerance = 1e-07) expect_equal(x22$numberOfActiveArms[1, ], c(4, 4, 4)) expect_equal(x22$numberOfActiveArms[2, ], c(1, 1, 1)) expect_equal(x22$numberOfActiveArms[3, ], c(NaN, NaN, NaN)) expect_equal(x22$expectedNumberOfEvents, c(NaN, NaN, NaN)) expect_equal(unlist(as.list(x22$singleNumberOfEventsPerStage)), c(2.8181818, 0, NaN, 2.6666667, 0, NaN, 2.5384615, 0, NaN, 2, 0, NaN, 2, 0.375, NaN, 2, 0.52525253, NaN, 1.1818182, 0, NaN, 1.3333333, 0.33709021, NaN, 1.4615385, 0, NaN, 0.36363636, 0.36363636, NaN, 0.66666667, 0.33333333, NaN, 0.92307692, 0.68832425, NaN, 3.6363636, 3.6363636, NaN, 3.3333333, 3.1343922, NaN, 3.0769231, 3.102495, NaN), tolerance = 1e-07) expect_equal(x22$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x22$conditionalPowerAchieved[2, ], c(0.99998124, 0.93006261, 0.86196268), tolerance = 1e-07) expect_equal(x22$conditionalPowerAchieved[3, ], c(NaN, NaN, NaN)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x22), NA))) expect_output(print(x22)$show()) invisible(capture.output(expect_error(summary(x22), NA))) expect_output(summary(x22)$show()) x22CodeBased <- eval(parse(text = getObjectRCode(x22, stringWrapParagraphWidth = NULL))) expect_equal(x22CodeBased$eventsPerStage, x22$eventsPerStage, tolerance = 1e-05) expect_equal(x22CodeBased$iterations, x22$iterations, tolerance = 1e-05) expect_equal(x22CodeBased$rejectAtLeastOne, x22$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x22CodeBased$rejectedArmsPerStage, x22$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x22CodeBased$futilityStop, x22$futilityStop, tolerance = 1e-05) expect_equal(x22CodeBased$futilityPerStage, x22$futilityPerStage, tolerance = 1e-05) expect_equal(x22CodeBased$earlyStop, x22$earlyStop, tolerance = 1e-05) expect_equal(x22CodeBased$successPerStage, x22$successPerStage, tolerance = 1e-05) expect_equal(x22CodeBased$selectedArms, x22$selectedArms, tolerance = 1e-05) expect_equal(x22CodeBased$numberOfActiveArms, x22$numberOfActiveArms, tolerance = 1e-05) expect_equal(x22CodeBased$expectedNumberOfEvents, x22$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(x22CodeBased$singleNumberOfEventsPerStage, x22$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(x22CodeBased$conditionalPowerAchieved, x22$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x22), "character") df <- as.data.frame(x22) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x22) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmSurvival': using calcSubjectsFunction", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmSurvival} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmSurvivalCholeskyTransformation} # @refFS[Formula]{fs:simulationMultiArmSurvivalCorrMatrix} # @refFS[Formula]{fs:simulationMultiArmSurvivalEvents} # @refFS[Formula]{fs:simulationMultiArmSurvivalLogRanks} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} calcSubjectsFunctionSimulationMultiArmSurvival <- function(..., stage, minNumberOfEventsPerStage) { return(ifelse(stage == 3, 33, minNumberOfEventsPerStage[stage])) } x <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, plannedEvents = c(10, 30, 50), omegaMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), directionUpper = FALSE, minNumberOfEventsPerStage = c(10, 4, 4), maxNumberOfEventsPerStage = c(10, 100, 100), maxNumberOfIterations = 10, calcEventsFunction = calcSubjectsFunctionSimulationMultiArmSurvival ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x' with expected results expect_equal(unlist(as.list(x$eventsPerStage)), c(5.6153846, 8.5080558, 32.538332, 5.2857143, 7.9818379, 30.224858, 5, 7.6798535, 30.027381, 4.75, 7.4486928, 28.520044, 5.0769231, 8.1039238, 33.365555, 4.8571429, 7.7179724, 31.319816, 4.6666667, 7.3312821, 29.521667, 4.5, 6.9975232, 27.961662, 4.5384615, 7.765565, 33.68068, 4.4285714, 7.692437, 34.619328, 4.3333333, 7.4419048, 32.624048, 4.25, 7.3932749, 34.276803, 4, 6.9887723, 31.899976, 4, 7.2675522, 34.224858, 4, 7.0265201, 31.574048, 4, 6.6197454, 28.704254), tolerance = 1e-07) expect_equal(x$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x$iterations[3, ], c(9, 10, 8, 9)) expect_equal(x$rejectAtLeastOne, c(0.3, 0.4, 0.7, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0.1, 0.2, 0, 0, 0.1, 0, 0, 0.1, 0, 0, 0.3, 0, 0.1, 0.3, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x$futilityStop, c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[2, ], c(0.1, 0, 0.2, 0.1), tolerance = 1e-07) expect_equal(x$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$successPerStage[2, ], c(0.1, 0, 0.2, 0.1), tolerance = 1e-07) expect_equal(x$successPerStage[3, ], c(0.2, 0.4, 0.5, 0.2), tolerance = 1e-07) expect_equal(unlist(as.list(x$selectedArms)), c(1, 0.1, 0.1, 1, 0, 0, 1, 0.1, 0.1, 1, 0.2, 0.1, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.4, 0.3, 1, 0.4, 0.4, 1, 0.4, 0.3, 1, 0.5, 0.5, 1, 0.3, 0.3, 1, 0.5, 0.5, 1, 0.4, 0.3, 1, 0.2, 0.2), tolerance = 1e-07) expect_equal(x$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x$expectedNumberOfEvents, c(43.7, 47, 40.4, 43.7), tolerance = 1e-07) expect_equal(unlist(as.list(x$singleNumberOfEventsPerStage)), c(2.5384615, 0.18082192, 1.6575342, 2.4285714, 0, 0, 2.3333333, 0.18666667, 1.925, 2.25, 0.37894737, 1.7368421, 2, 0.31515152, 2.8888889, 2, 0.16470588, 1.3588235, 2, 0.17142857, 1.7678571, 2, 0.17777778, 1.6296296, 1.4615385, 0.51525424, 3.5423729, 1.5714286, 0.56774194, 4.683871, 1.6666667, 0.61538462, 4.7596154, 1.75, 0.82352941, 7.5490196, 0.92307692, 0.27692308, 2.5384615, 1.1428571, 0.57142857, 4.7142857, 1.3333333, 0.53333333, 4.125, 1.5, 0.3, 2.75, 3.0769231, 2.7118493, 22.372742, 2.8571429, 2.6961236, 22.24302, 2.6666667, 2.4931868, 20.422527, 2.5, 2.3197454, 19.334509), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$conditionalPowerAchieved[2, ], c(0.13227215, 0.33500952, 0.32478794, 0.19174696), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[3, ], c(0.28682503, 0.6076832, 0.60939504, 0.37477275), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$eventsPerStage, x$eventsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$iterations, x$iterations, tolerance = 1e-05) expect_equal(xCodeBased$rejectAtLeastOne, x$rejectAtLeastOne, tolerance = 1e-05) expect_equal(xCodeBased$rejectedArmsPerStage, x$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$futilityStop, x$futilityStop, tolerance = 1e-05) expect_equal(xCodeBased$futilityPerStage, x$futilityPerStage, tolerance = 1e-05) expect_equal(xCodeBased$earlyStop, x$earlyStop, tolerance = 1e-05) expect_equal(xCodeBased$successPerStage, x$successPerStage, tolerance = 1e-05) expect_equal(xCodeBased$selectedArms, x$selectedArms, tolerance = 1e-05) expect_equal(xCodeBased$numberOfActiveArms, x$numberOfActiveArms, tolerance = 1e-05) expect_equal(xCodeBased$expectedNumberOfEvents, x$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(xCodeBased$singleNumberOfEventsPerStage, x$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$conditionalPowerAchieved, x$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmSurvival': using selectArmsFunction", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmSurvival} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmSurvivalCholeskyTransformation} # @refFS[Formula]{fs:simulationMultiArmSurvivalCorrMatrix} # @refFS[Formula]{fs:simulationMultiArmSurvivalEvents} # @refFS[Formula]{fs:simulationMultiArmSurvivalLogRanks} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} selectArmsFunctionSimulationMultiArmSurvival <- function(effectSizes) { return(c(TRUE, FALSE, FALSE, FALSE)) } x <- getSimulationMultiArmSurvival( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, plannedEvents = c(10, 30, 50), omegaMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), directionUpper = FALSE, maxNumberOfIterations = 10, selectArmsFunction = selectArmsFunctionSimulationMultiArmSurvival, typeOfSelection = "userDefined" ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x' with expected results expect_equal(unlist(as.list(x$eventsPerStage)), c(5.6153846, 25.615385, 45.615385, 5.2857143, 25.285714, 45.285714, 5, 25, 45, 4.75, 24.75, 44.75, 5.0769231, 16.035827, 26.994731, 4.8571429, 15.667954, 26.478764, 4.6666667, 15.333333, 26, 4.5, 15.026316, 25.552632, 4.5384615, 15.497366, 26.45627, 4.4285714, 15.239382, 26.050193, 4.3333333, 15, 25.666667, 4.25, 14.776316, 25.302632, 4, 14.958904, 25.917808, 4, 14.810811, 25.621622, 4, 14.666667, 25.333333, 4, 14.526316, 25.052632), tolerance = 1e-07) expect_equal(x$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x$iterations[3, ], c(10, 10, 10, 9)) expect_equal(x$rejectAtLeastOne, c(0, 0, 0, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), tolerance = 1e-07) expect_equal(x$futilityStop, c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[2, ], c(0, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$successPerStage[2, ], c(0, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x$selectedArms)), c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0), tolerance = 1e-07) expect_equal(x$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x$expectedNumberOfEvents, c(50, 50, 50, 48)) expect_equal(unlist(as.list(x$singleNumberOfEventsPerStage)), c(2.5384615, 9.0410959, 9.0410959, 2.4285714, 9.1891892, 9.1891892, 2.3333333, 9.3333333, 9.3333333, 2.25, 9.4736842, 9.4736842, 2, 0, 0, 2, 0, 0, 2, 0, 0, 2, 0, 0, 1.4615385, 0, 0, 1.5714286, 0, 0, 1.6666667, 0, 0, 1.75, 0, 0, 0.92307692, 0, 0, 1.1428571, 0, 0, 1.3333333, 0, 0, 1.5, 0, 0, 3.0769231, 10.958904, 10.958904, 2.8571429, 10.810811, 10.810811, 2.6666667, 10.666667, 10.666667, 2.5, 10.526316, 10.526316), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$conditionalPowerAchieved[2, ], c(0.33564601, 0.59192905, 0.61161484, 0.44432847), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[3, ], c(0.10158651, 0.080642472, 0.3234231, 0.034914809), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$eventsPerStage, x$eventsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$iterations, x$iterations, tolerance = 1e-05) expect_equal(xCodeBased$rejectAtLeastOne, x$rejectAtLeastOne, tolerance = 1e-05) expect_equal(xCodeBased$rejectedArmsPerStage, x$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$futilityStop, x$futilityStop, tolerance = 1e-05) expect_equal(xCodeBased$futilityPerStage, x$futilityPerStage, tolerance = 1e-05) expect_equal(xCodeBased$earlyStop, x$earlyStop, tolerance = 1e-05) expect_equal(xCodeBased$successPerStage, x$successPerStage, tolerance = 1e-05) expect_equal(xCodeBased$selectedArms, x$selectedArms, tolerance = 1e-05) expect_equal(xCodeBased$numberOfActiveArms, x$numberOfActiveArms, tolerance = 1e-05) expect_equal(xCodeBased$expectedNumberOfEvents, x$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(xCodeBased$singleNumberOfEventsPerStage, x$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$conditionalPowerAchieved, x$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmSurvival': typeOfShape = sigmoidEmax", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmSurvival} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmSurvivalCholeskyTransformation} # @refFS[Formula]{fs:simulationMultiArmSurvivalCorrMatrix} # @refFS[Formula]{fs:simulationMultiArmSurvivalEvents} # @refFS[Formula]{fs:simulationMultiArmSurvivalLogRanks} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} designIN <- getDesignInverseNormal(typeOfDesign = "P", kMax = 3, futilityBounds = c(0, 0)) x <- getSimulationMultiArmSurvival(designIN, activeArms = 3, typeOfShape = "sigmoidEmax", omegaMaxVector = seq(1, 1.9, 0.3), gED50 = 2, plannedEvents = cumsum(rep(50, 3)), intersectionTest = "Sidak", typeOfSelection = "rBest", rValue = 2, threshold = -Inf, successCriterion = "all", maxNumberOfIterations = 100, seed = 3456 ) ## Comparison of the results of SimulationResultsMultiArmSurvival object 'x' with expected results expect_equal(unlist(as.list(x$eventsPerStage)), c(25, 54.166667, 83.179012, 23.702032, 48.059626, 73.088162, 22.633745, 47.376736, 72.759392, 21.73913, 42.12314, 62.760088, 25, 52.5, 80.895062, 24.266366, 53.226501, 81.514068, 23.662551, 48.016442, 72.194538, 23.1569, 49.755556, 76.303771, 25, 51.666667, 77.592593, 24.604966, 51.66004, 78.734811, 24.279835, 53.095902, 81.487679, 24.007561, 52.639961, 81.090004), tolerance = 1e-07) expect_equal(x$iterations[1, ], c(100, 100, 100, 100)) expect_equal(x$iterations[2, ], c(40, 57, 66, 79)) expect_equal(x$iterations[3, ], c(27, 48, 55, 70)) expect_equal(x$rejectAtLeastOne, c(0.02, 0.07, 0.19, 0.21), tolerance = 1e-07) expect_equal(unlist(as.list(x$rejectedArmsPerStage)), c(0, 0, 0, 0.01, 0.01, 0.01, 0.02, 0.01, 0.02, 0, 0.02, 0.02, 0.01, 0, 0, 0, 0.01, 0.02, 0.03, 0.03, 0.01, 0.03, 0.06, 0.01, 0.01, 0, 0, 0.01, 0.01, 0.02, 0.04, 0.03, 0.07, 0.03, 0.09, 0.06), tolerance = 1e-07) expect_equal(x$futilityStop, c(0.73, 0.51, 0.41, 0.24), tolerance = 1e-07) expect_equal(x$futilityPerStage[1, ], c(0.6, 0.43, 0.34, 0.21), tolerance = 1e-07) expect_equal(x$futilityPerStage[2, ], c(0.13, 0.08, 0.07, 0.03), tolerance = 1e-07) expect_equal(x$earlyStop[1, ], c(0.6, 0.43, 0.34, 0.21), tolerance = 1e-07) expect_equal(x$earlyStop[2, ], c(0.13, 0.09, 0.11, 0.09), tolerance = 1e-07) expect_equal(x$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$successPerStage[2, ], c(0, 0.01, 0.04, 0.06), tolerance = 1e-07) expect_equal(x$successPerStage[3, ], c(0, 0.02, 0.03, 0.05), tolerance = 1e-07) expect_equal(unlist(as.list(x$selectedArms)), c(1, 0.3, 0.2, 1, 0.31, 0.28, 1, 0.42, 0.37, 1, 0.35, 0.32, 1, 0.26, 0.19, 1, 0.45, 0.36, 1, 0.38, 0.31, 1, 0.59, 0.52, 1, 0.24, 0.15, 1, 0.38, 0.32, 1, 0.52, 0.42, 1, 0.64, 0.56), tolerance = 1e-07) expect_equal(x$numberOfActiveArms[1, ], c(3, 3, 3, 3)) expect_equal(x$numberOfActiveArms[2, ], c(2, 2, 2, 2)) expect_equal(x$numberOfActiveArms[3, ], c(2, 2, 2, 2)) expect_equal(x$expectedNumberOfEvents, c(83.5, 102.5, 110.5, 124.5), tolerance = 1e-07) expect_equal(unlist(as.list(x$singleNumberOfEventsPerStage)), c(12.5, 12.5, 12.345679, 12.41535, 9.1711925, 9.8330988, 12.345679, 10.786517, 11.406391, 12.287335, 7.5764768, 7.8193452, 12.5, 10.833333, 11.728395, 12.979684, 13.773733, 13.092131, 13.374486, 10.397417, 10.201831, 13.705104, 13.791123, 13.730612, 12.5, 10, 9.2592593, 13.318284, 11.868672, 11.879334, 13.99177, 14.859592, 14.415513, 14.555766, 15.824867, 15.63244, 12.5, 16.666667, 16.666667, 11.286682, 15.186402, 15.195437, 10.288066, 13.956474, 13.976265, 9.4517958, 12.807533, 12.817602), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$conditionalPowerAchieved[2, ], c(0.066083689, 0.14406787, 0.27240426, 0.24161087), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[3, ], c(0.13321164, 0.19096794, 0.29528894, 0.30979546), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$eventsPerStage, x$eventsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$iterations, x$iterations, tolerance = 1e-05) expect_equal(xCodeBased$rejectAtLeastOne, x$rejectAtLeastOne, tolerance = 1e-05) expect_equal(xCodeBased$rejectedArmsPerStage, x$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$futilityStop, x$futilityStop, tolerance = 1e-05) expect_equal(xCodeBased$futilityPerStage, x$futilityPerStage, tolerance = 1e-05) expect_equal(xCodeBased$earlyStop, x$earlyStop, tolerance = 1e-05) expect_equal(xCodeBased$successPerStage, x$successPerStage, tolerance = 1e-05) expect_equal(xCodeBased$selectedArms, x$selectedArms, tolerance = 1e-05) expect_equal(xCodeBased$numberOfActiveArms, x$numberOfActiveArms, tolerance = 1e-05) expect_equal(xCodeBased$expectedNumberOfEvents, x$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(xCodeBased$singleNumberOfEventsPerStage, x$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$conditionalPowerAchieved, x$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmSurvival': comparison of base and multi-arm, inverse normal design", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmSurvival} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmSurvivalCholeskyTransformation} # @refFS[Formula]{fs:simulationMultiArmSurvivalCorrMatrix} # @refFS[Formula]{fs:simulationMultiArmSurvivalEvents} # @refFS[Formula]{fs:simulationMultiArmSurvivalLogRanks} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} allocationRatioPlanned <- 1 design <- getDesignInverseNormal( typeOfDesign = "WT", deltaWT = 0.05, futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.8, 1) ) x <- getSimulationMultiArmSurvival(design, activeArms = 1, omegaMaxVector = 1 / seq(1, 1.8, 0.4), plannedEvents = c(20, 40, 60), conditionalPower = 0.99, maxNumberOfEventsPerStage = c(NA, 100, 100), minNumberOfEventsPerStage = c(NA, 10, 10), maxNumberOfIterations = 100, directionUpper = FALSE, allocationRatioPlanned = allocationRatioPlanned, seed = 1234 ) y <- getSimulationSurvival(design, pi2 = 0.2, hazardRatio = 1 / seq(1, 1.8, 0.4), plannedEvents = c(20, 40, 60), maxNumberOfSubjects = 500, conditionalPower = 0.99, maxNumberOfEventsPerStage = c(NA, 100, 100), minNumberOfEventsPerStage = c(NA, 10, 10), maxNumberOfIterations = 100, directionUpper = FALSE, allocation1 = 1, allocation2 = 1, seed = 1234 ) comp1 <- y$overallReject - x$rejectAtLeastOne ## Comparison of the results of numeric object 'comp1' with expected results expect_equal(comp1, c(-0.02, 0.01, 0.06), tolerance = 1e-07) comp2 <- y$rejectPerStage - x$rejectedArmsPerStage[, , 1] ## Comparison of the results of matrixarray object 'comp2' with expected results expect_equal(comp2[1, ], c(0, 0, 0)) expect_equal(comp2[2, ], c(-0.02, 0.02, 0.03), tolerance = 1e-07) expect_equal(comp2[3, ], c(0, -0.01, 0.03), tolerance = 1e-07) comp3 <- y$futilityPerStage - x$futilityPerStage ## Comparison of the results of matrixarray object 'comp3' with expected results expect_equal(comp3[1, ], c(-0.06, -0.02, -0.03), tolerance = 1e-07) expect_equal(comp3[2, ], c(0.08, 0.06, 0), tolerance = 1e-07) comp4 <- round(y$overallEventsPerStage - x$eventsPerStage[, , 1], 1) ## Comparison of the results of matrixarray object 'comp4' with expected results expect_equal(comp4[1, ], c(0, 0, 0)) expect_equal(comp4[2, ], c(1.2, -0.4, 1), tolerance = 1e-07) expect_equal(comp4[3, ], c(1.7, -0.8, 1), tolerance = 1e-07) comp5 <- round(y$expectedNumberOfEvents - x$expectedNumberOfEvents, 1) ## Comparison of the results of numeric object 'comp5' with expected results expect_equal(comp5, c(6.9, -4.7, 3.6), tolerance = 1e-07) comp6 <- x$earlyStop - y$earlyStop ## Comparison of the results of matrixarray object 'comp6' with expected results expect_equal(comp6[1, ], c(-0.43, -0.73, -0.52), tolerance = 1e-07) expect_equal(comp6[2, ], c(-0.13, -0.32, -0.04), tolerance = 1e-07) }) test_that("'getSimulationMultiArmSurvival': comparison of base and multi-arm, Fisher design", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmSurvival} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmSurvivalCholeskyTransformation} # @refFS[Formula]{fs:simulationMultiArmSurvivalCorrMatrix} # @refFS[Formula]{fs:simulationMultiArmSurvivalEvents} # @refFS[Formula]{fs:simulationMultiArmSurvivalLogRanks} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} design <- getDesignFisher(alpha0Vec = c(0.6, 0.4), informationRates = c(0.5, 0.6, 1)) x <- getSimulationMultiArmSurvival(design, activeArms = 1, omegaMaxVector = 1 / seq(1, 1.8, 0.4), plannedEvents = c(20, 40, 60), conditionalPower = 0.99, maxNumberOfEventsPerStage = c(NA, 100, 100), minNumberOfEventsPerStage = c(NA, 10, 10), maxNumberOfIterations = 100, directionUpper = FALSE, seed = 1234 ) y <- getSimulationSurvival(design, pi2 = 0.2, hazardRatio = 1 / seq(1, 1.8, 0.4), plannedEvents = c(20, 40, 60), maxNumberOfSubjects = 500, conditionalPower = 0.99, maxNumberOfEventsPerStage = c(NA, 100, 100), minNumberOfEventsPerStage = c(NA, 10, 10), maxNumberOfIterations = 100, directionUpper = FALSE, allocation1 = 1, allocation2 = 1, seed = 1234 ) comp1 <- y$overallReject - x$rejectAtLeastOne ## Comparison of the results of numeric object 'comp1' with expected results expect_equal(comp1, c(-0.02, -0.01, 0.02), tolerance = 1e-07) comp2 <- y$rejectPerStage - x$rejectedArmsPerStage[, , 1] ## Comparison of the results of matrixarray object 'comp2' with expected results expect_equal(comp2[1, ], c(-0.02, 0.01, -0.01), tolerance = 1e-07) expect_equal(comp2[2, ], c(0, -0.03, 0.01), tolerance = 1e-07) expect_equal(comp2[3, ], c(0, 0.01, 0.02), tolerance = 1e-07) comp3 <- y$futilityPerStage - x$futilityPerStage ## Comparison of the results of matrixarray object 'comp3' with expected results expect_equal(comp3[1, ], c(-0.03, 0.01, -0.01), tolerance = 1e-07) expect_equal(comp3[2, ], c(0.05, 0.05, -0.01), tolerance = 1e-07) comp4 <- round(y$overallEventsPerStage - x$eventsPerStage[, , 1], 1) ## Comparison of the results of matrixarray object 'comp4' with expected results expect_equal(comp4[1, ], c(0, 0, 0)) expect_equal(comp4[2, ], c(-0.6, 0.8, -0.3), tolerance = 1e-07) expect_equal(comp4[3, ], c(-0.6, 0.8, -0.3), tolerance = 1e-07) comp5 <- round(y$expectedNumberOfEvents - x$expectedNumberOfEvents, 1) ## Comparison of the results of numeric object 'comp5' with expected results expect_equal(comp5, c(4.7, -5.3, 3.6), tolerance = 1e-07) comp6 <- x$earlyStop - y$earlyStop ## Comparison of the results of matrixarray object 'comp6' with expected results expect_equal(comp6[1, ], c(-0.27, -0.42, -0.29), tolerance = 1e-07) expect_equal(comp6[2, ], c(-0.22, -0.54, -0.18), tolerance = 1e-07) }) test_that("'getSimulationMultiArmSurvival': comparison of base and multi-arm, inverse normal design with user alpha spending", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmSurvival} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmSurvivalCholeskyTransformation} # @refFS[Formula]{fs:simulationMultiArmSurvivalCorrMatrix} # @refFS[Formula]{fs:simulationMultiArmSurvivalEvents} # @refFS[Formula]{fs:simulationMultiArmSurvivalLogRanks} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} design <- getDesignInverseNormal( typeOfDesign = "asUser", userAlphaSpending = c(0, 0, 0.025), informationRates = c(0.2, 0.8, 1) ) x <- getSimulationMultiArmSurvival(design, activeArms = 1, omegaMaxVector = 1 / seq(1, 1.8, 0.4), plannedEvents = c(20, 40, 60), conditionalPower = 0.99, maxNumberOfEventsPerStage = c(NA, 100, 100), minNumberOfEventsPerStage = c(NA, 10, 10), maxNumberOfIterations = 100, directionUpper = FALSE, seed = 1234 ) y <- getSimulationSurvival(design, pi2 = 0.2, hazardRatio = 1 / seq(1, 1.8, 0.4), plannedEvents = c(20, 40, 60), maxNumberOfSubjects = 500, conditionalPower = 0.99, maxNumberOfEventsPerStage = c(NA, 100, 100), minNumberOfEventsPerStage = c(NA, 10, 10), maxNumberOfIterations = 100, directionUpper = FALSE, allocation1 = 1, allocation2 = 1, seed = 1234 ) comp1 <- y$overallReject - x$rejectAtLeastOne ## Comparison of the results of numeric object 'comp1' with expected results expect_equal(comp1, c(-0.01, 0.02, 0.01), tolerance = 1e-07) comp2 <- y$rejectPerStage - x$rejectedArmsPerStage[, , 1] ## Comparison of the results of matrixarray object 'comp2' with expected results expect_equal(comp2[1, ], c(0, 0, 0)) expect_equal(comp2[2, ], c(0, 0, 0)) expect_equal(comp2[3, ], c(-0.01, 0.02, 0.01), tolerance = 1e-07) comp3 <- y$futilityPerStage - x$futilityPerStage ## Comparison of the results of matrixarray object 'comp3' with expected results expect_equal(comp3[1, ], c(0, 0, 0)) expect_equal(comp3[2, ], c(0, 0, 0)) comp4 <- round(y$overallEventsPerStage - x$eventsPerStage[, , 1], 1) ## Comparison of the results of matrixarray object 'comp4' with expected results expect_equal(comp4[1, ], c(0, 0, 0)) expect_equal(comp4[2, ], c(0, 0, 0)) expect_equal(comp4[3, ], c(-0.2, -3.5, 0.6), tolerance = 1e-07) comp5 <- round(y$expectedNumberOfEvents - x$expectedNumberOfEvents, 1) ## Comparison of the results of numeric object 'comp5' with expected results expect_equal(comp5, c(-0.2, -3.5, 0.6), tolerance = 1e-07) comp6 <- x$earlyStop - y$earlyStop ## Comparison of the results of matrixarray object 'comp6' with expected results expect_equal(comp6[1, ], c(0, 0, 0)) expect_equal(comp6[2, ], c(0, 0, 0)) }) rpact/tests/testthat/test-f_analysis_enrichment_means.R0000644000176200001440000023205014370207346023224 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_analysis_enrichment_means.R ## | Creation date: 06 February 2023, 12:07:18 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Analysis Enrichment Means Function (one sub-population)") test_that("'getAnalysisResults': select S1 at first IA, gMax = 2, inverse normal design", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedtTestEnrichment} S1 <- getDataset( sampleSize1 = c(12, 21), sampleSize2 = c(18, 21), mean1 = c(107.7, 84.9), mean2 = c(165.6, 195.9), stDev1 = c(128.5, 139.5), stDev2 = c(120.1, 185.0) ) F <- getDataset( sampleSize1 = c(26, NA), sampleSize2 = c(29, NA), mean1 = c(86.48462, NA), mean2 = c(148.34138, NA), stDev1 = c(129.1485, NA), stDev2 = c(122.888, NA) ) dataInput1 <- getDataset(S1 = S1, F = F) ## Comparison of the results of DatasetMeans object 'dataInput1' with expected results expect_equal(dataInput1$overallSampleSizes, c(12, 26, 18, 29, 33, NA_real_, 39, NA_real_)) expect_equal(dataInput1$overallMeans, c(107.7, 86.48462, 165.6, 148.34138, 93.190909, NA_real_, 181.91538, NA_real_), tolerance = 1e-07) expect_equal(dataInput1$overallStDevs, c(128.5, 129.1485, 120.1, 122.888, 134.02535, NA_real_, 157.16289, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput1), NA))) expect_output(print(dataInput1)$show()) invisible(capture.output(expect_error(summary(dataInput1), NA))) expect_output(summary(dataInput1)$show()) dataInput1CodeBased <- eval(parse(text = getObjectRCode(dataInput1, stringWrapParagraphWidth = NULL))) expect_equal(dataInput1CodeBased$overallSampleSizes, dataInput1$overallSampleSizes, tolerance = 1e-05) expect_equal(dataInput1CodeBased$overallMeans, dataInput1$overallMeans, tolerance = 1e-05) expect_equal(dataInput1CodeBased$overallStDevs, dataInput1$overallStDevs, tolerance = 1e-05) expect_type(names(dataInput1), "character") df <- as.data.frame(dataInput1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design1 <- getDesignInverseNormal( kMax = 3, alpha = 0.02, futilityBounds = c(-0.5, 0), bindingFutility = FALSE, typeOfDesign = "OF", informationRates = c(0.5, 0.7, 1) ) x1 <- getAnalysisResults( design = design1, dataInput = dataInput1, directionUpper = FALSE, normalApproximation = FALSE, varianceOption = "pooledFromFull", intersectionTest = "Bonferroni", stratifiedAnalysis = FALSE, stage = 2, thetaH1 = c(-30, NA), assumedStDevs = c(88, NA), nPlanned = c(30), allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x1' with expected results expect_equal(x1$conditionalRejectionProbabilities[1, ], c(0.040655272, 0.29596348, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[2, ], c(0.065736952, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower[1, ], c(NA_real_, NA_real_, 0.6346437), tolerance = 1e-07) expect_equal(x1$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[1, ], c(-215.41406, -176.0794, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[2, ], c(-176.00816, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[1, ], c(99.614058, 24.117528, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[2, ], c(52.294639, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[1, ], c(0.25380947, 0.041128123, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[2, ], c(0.19818652, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': stratified analysis, select S1 at first IA, gMax = 2, Fisher design", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedtTestEnrichment} S1 <- getDataset( sampleSize1 = c(12, 21), sampleSize2 = c(18, 21), mean1 = c(107.7, 84.9), mean2 = c(165.6, 195.9), stDev1 = c(128.5, 139.5), stDev2 = c(120.1, 185.0) ) R <- getDataset( sampleSize1 = c(14, NA), sampleSize2 = c(11, NA), mean1 = c(68.3, NA), mean2 = c(120.1, NA), stDev1 = c(124.0, NA), stDev2 = c(116.8, NA) ) dataInput2 <- getDataset(S1 = S1, R = R) ## Comparison of the results of DatasetMeans object 'dataInput2' with expected results expect_equal(dataInput2$overallSampleSizes, c(12, 14, 18, 11, 33, NA_real_, 39, NA_real_)) expect_equal(dataInput2$overallMeans, c(107.7, 68.3, 165.6, 120.1, 93.190909, NA_real_, 181.91538, NA_real_), tolerance = 1e-07) expect_equal(dataInput2$overallStDevs, c(128.5, 124, 120.1, 116.8, 134.02535, NA_real_, 157.16289, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput2), NA))) expect_output(print(dataInput2)$show()) invisible(capture.output(expect_error(summary(dataInput2), NA))) expect_output(summary(dataInput2)$show()) dataInput2CodeBased <- eval(parse(text = getObjectRCode(dataInput2, stringWrapParagraphWidth = NULL))) expect_equal(dataInput2CodeBased$overallSampleSizes, dataInput2$overallSampleSizes, tolerance = 1e-05) expect_equal(dataInput2CodeBased$overallMeans, dataInput2$overallMeans, tolerance = 1e-05) expect_equal(dataInput2CodeBased$overallStDevs, dataInput2$overallStDevs, tolerance = 1e-05) expect_type(names(dataInput2), "character") df <- as.data.frame(dataInput2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design2 <- getDesignFisher( kMax = 3, alpha = 0.02, alpha0Vec = c(0.7, 0.5), method = "fullAlpha", bindingFutility = TRUE, informationRates = c(0.3, 0.7, 1) ) x2 <- getAnalysisResults( design = design2, dataInput = dataInput2, directionUpper = FALSE, normalApproximation = FALSE, varianceOption = "pooledFromFull", intersectionTest = "Bonferroni", stratifiedAnalysis = FALSE, stage = 2, thetaH1 = c(-30, NA), assumedStDevs = c(88, NA), nPlanned = c(30), allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsEnrichmentFisher object 'x2' with expected results expect_equal(x2$conditionalRejectionProbabilities[1, ], c(0.030372979, 0.38266716, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[2, ], c(0.042518986, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower[1, ], c(NA_real_, NA_real_, 0.71962915), tolerance = 1e-07) expect_equal(x2$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[1, ], c(-187.96966, -183.80634, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[2, ], c(-156.27269, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[1, ], c(72.16966, 16.133901, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[2, ], c(32.559163, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[1, ], c(0.19557155, 0.034517266, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[2, ], c(0.13877083, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design1 <- getDesignInverseNormal( kMax = 3, alpha = 0.02, futilityBounds = c(-0.5, 0), bindingFutility = FALSE, typeOfDesign = "OF", informationRates = c(0.5, 0.7, 1) ) x3 <- getAnalysisResults( design = design1, dataInput = dataInput2, directionUpper = FALSE, normalApproximation = FALSE, intersectionTest = "Sidak", stratifiedAnalysis = TRUE, stage = 2, thetaH1 = c(-30, NA), assumedStDevs = c(88, NA), nPlanned = c(30), allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x3' with expected results expect_equal(x3$conditionalRejectionProbabilities[1, ], c(0.041603465, 0.30059767, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities[2, ], c(0.044887021, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower[1, ], c(NA_real_, NA_real_, 0.63965664), tolerance = 1e-07) expect_equal(x3$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[1, ], c(-220.28415, -176.85912, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[2, ], c(-167.67059, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[1, ], c(104.48415, 23.636689, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[2, ], c(57.495741, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues[1, ], c(0.25104477, 0.040430988, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues[2, ], c(0.24199442, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': select S1 at first IA, gMax = 2, inverse normal design, Sidak and Spiessens & Debois", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedtTestEnrichment} design1 <- getDesignInverseNormal( kMax = 3, alpha = 0.02, futilityBounds = c(-0.5, 0), bindingFutility = FALSE, typeOfDesign = "OF", informationRates = c(0.5, 0.7, 1) ) S1 <- getDataset( sampleSize1 = c(12, 21), sampleSize2 = c(18, 21), mean1 = c(107.7, 84.9), mean2 = c(165.6, 195.9), stDev1 = c(128.5, 139.5), stDev2 = c(120.1, 185.0) ) F <- getDataset( sampleSize1 = c(26, NA), sampleSize2 = c(29, NA), mean1 = c(86.48462, NA), mean2 = c(148.34138, NA), stDev1 = c(129.1485, NA), stDev2 = c(122.888, NA) ) dataInput1 <- getDataset(S1 = S1, F = F) x4 <- getAnalysisResults( design = design1, dataInput = dataInput1, directionUpper = FALSE, normalApproximation = FALSE, varianceOption = "notPooled", intersectionTest = "Sidak", stratifiedAnalysis = FALSE, stage = 2, nPlanned = c(30), allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x4' with expected results expect_equal(x4$thetaH1[1, ], -88.724476, tolerance = 1e-07) expect_equal(x4$thetaH1[2, ], NA_real_) expect_equal(x4$assumedStDevs[1, ], 147.03819, tolerance = 1e-07) expect_equal(x4$assumedStDevs[2, ], NA_real_) expect_equal(x4$conditionalRejectionProbabilities[1, ], c(0.039522227, 0.28885292, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities[2, ], c(0.066220149, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalPower[1, ], c(NA_real_, NA_real_, 0.84164989), tolerance = 1e-07) expect_equal(x4$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x4$repeatedConfidenceIntervalLowerBounds[1, ], c(-226.91549, -179.08628, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds[2, ], c(-176.48166, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds[1, ], c(111.11549, 25.050962, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds[2, ], c(52.768138, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues[1, ], c(0.25721122, 0.042227707, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues[2, ], c(0.1973759, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$thetaH1, x4$thetaH1, tolerance = 1e-05) expect_equal(x4CodeBased$assumedStDevs, x4$assumedStDevs, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalRejectionProbabilities, x4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPower, x4$conditionalPower, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalLowerBounds, x4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalUpperBounds, x4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedPValues, x4$repeatedPValues, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x5 <- getAnalysisResults( design = design1, dataInput = dataInput1, directionUpper = FALSE, normalApproximation = FALSE, varianceOption = "pooledFromFull", intersectionTest = "SpiessensDebois", stratifiedAnalysis = TRUE, stage = 2, nPlanned = c(30), allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x5' with expected results expect_equal(x5$thetaH1[1, ], -88.724476, tolerance = 1e-07) expect_equal(x5$thetaH1[2, ], NA_real_) expect_equal(x5$assumedStDevs[1, ], 147.03819, tolerance = 1e-07) expect_equal(x5$assumedStDevs[2, ], NA_real_) expect_equal(x5$conditionalRejectionProbabilities[1, ], c(0.039526191, 0.29036799, NA_real_), tolerance = 1e-07) expect_equal(x5$conditionalRejectionProbabilities[2, ], c(0.083354471, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x5$conditionalPower[1, ], c(NA_real_, NA_real_, 0.84271782), tolerance = 1e-07) expect_equal(x5$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x5$repeatedConfidenceIntervalLowerBounds[1, ], c(-213.98234, -174.20657, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalLowerBounds[2, ], c(-174.97059, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalUpperBounds[1, ], c(98.182344, 20.343092, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedConfidenceIntervalUpperBounds[2, ], c(51.257068, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedPValues[1, ], c(0.25719977, 0.041990242, NA_real_), tolerance = 1e-07) expect_equal(x5$repeatedPValues[2, ], c(0.17255753, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$thetaH1, x5$thetaH1, tolerance = 1e-05) expect_equal(x5CodeBased$assumedStDevs, x5$assumedStDevs, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalRejectionProbabilities, x5$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPower, x5$conditionalPower, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedConfidenceIntervalLowerBounds, x5$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedConfidenceIntervalUpperBounds, x5$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x5CodeBased$repeatedPValues, x5$repeatedPValues, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x6 <- getAnalysisResults( design = design1, dataInput = dataInput1, directionUpper = FALSE, normalApproximation = TRUE, varianceOption = "notPooled", intersectionTest = "SpiessensDebois", stratifiedAnalysis = FALSE, stage = 2, nPlanned = c(30), allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x6' with expected results expect_equal(x6$thetaH1[1, ], -88.724476, tolerance = 1e-07) expect_equal(x6$thetaH1[2, ], NA_real_) expect_equal(x6$assumedStDevs[1, ], 147.03819, tolerance = 1e-07) expect_equal(x6$assumedStDevs[2, ], NA_real_) expect_equal(x6$conditionalRejectionProbabilities[1, ], c(0.042609088, 0.32732548, NA_real_), tolerance = 1e-07) expect_equal(x6$conditionalRejectionProbabilities[2, ], c(0.088609047, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$conditionalPower[1, ], c(NA_real_, NA_real_, 0.86664918), tolerance = 1e-07) expect_equal(x6$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x6$repeatedConfidenceIntervalLowerBounds[1, ], c(-205.0678, -171.09289, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalLowerBounds[2, ], c(-169.37906, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalUpperBounds[1, ], c(89.267801, 17.032571, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedConfidenceIntervalUpperBounds[2, ], c(45.665535, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedPValues[1, ], c(0.24818852, 0.036684963, NA_real_), tolerance = 1e-07) expect_equal(x6$repeatedPValues[2, ], c(0.16619082, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$thetaH1, x6$thetaH1, tolerance = 1e-05) expect_equal(x6CodeBased$assumedStDevs, x6$assumedStDevs, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalRejectionProbabilities, x6$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPower, x6$conditionalPower, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedConfidenceIntervalLowerBounds, x6$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedConfidenceIntervalUpperBounds, x6$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x6CodeBased$repeatedPValues, x6$repeatedPValues, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': select S1 at first IA, gMax = 2, Fisher design, Sidak and Bonferroni", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedtTestEnrichment} design2 <- getDesignFisher( kMax = 3, alpha = 0.02, alpha0Vec = c(0.7, 0.5), method = "fullAlpha", bindingFutility = TRUE, informationRates = c(0.3, 0.7, 1) ) S1 <- getDataset( sampleSize1 = c(12, 21), sampleSize2 = c(18, 21), mean1 = c(107.7, 84.9), mean2 = c(165.6, 195.9), stDev1 = c(128.5, 139.5), stDev2 = c(120.1, 185.0) ) F <- getDataset( sampleSize1 = c(26, NA), sampleSize2 = c(29, NA), mean1 = c(86.48462, NA), mean2 = c(148.34138, NA), stDev1 = c(129.1485, NA), stDev2 = c(122.888, NA) ) dataInput1 <- getDataset(S1 = S1, F = F) x7 <- getAnalysisResults( design = design2, dataInput = dataInput1, directionUpper = FALSE, normalApproximation = FALSE, varianceOption = "pooled", intersectionTest = "Sidak", stratifiedAnalysis = FALSE, stage = 2, thetaH1 = c(-30, NA), assumedStDevs = c(88, NA), nPlanned = c(30), allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsEnrichmentFisher object 'x7' with expected results expect_equal(x7$conditionalRejectionProbabilities[1, ], c(0.029419226, 0.36686704, NA_real_), tolerance = 1e-07) expect_equal(x7$conditionalRejectionProbabilities[2, ], c(0.039811318, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$conditionalPower[1, ], c(NA_real_, NA_real_, 0.70542247), tolerance = 1e-07) expect_equal(x7$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x7$repeatedConfidenceIntervalLowerBounds[1, ], c(-194.17913, -187.01693, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalLowerBounds[2, ], c(-158.83149, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalUpperBounds[1, ], c(78.379133, 16.599438, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedConfidenceIntervalUpperBounds[2, ], c(35.117971, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedPValues[1, ], c(0.20187628, 0.035489058, NA_real_), tolerance = 1e-07) expect_equal(x7$repeatedPValues[2, ], c(0.14858412, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7), NA))) expect_output(print(x7)$show()) invisible(capture.output(expect_error(summary(x7), NA))) expect_output(summary(x7)$show()) x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL))) expect_equal(x7CodeBased$conditionalRejectionProbabilities, x7$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalPower, x7$conditionalPower, tolerance = 1e-05) expect_equal(x7CodeBased$repeatedConfidenceIntervalLowerBounds, x7$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x7CodeBased$repeatedConfidenceIntervalUpperBounds, x7$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x7CodeBased$repeatedPValues, x7$repeatedPValues, tolerance = 1e-05) expect_type(names(x7), "character") df <- as.data.frame(x7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x8 <- getAnalysisResults( design = design2, dataInput = dataInput1, directionUpper = FALSE, normalApproximation = FALSE, varianceOption = "notPooled", intersectionTest = "Bonferroni", stratifiedAnalysis = FALSE, stage = 2, nPlanned = c(30), allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsEnrichmentFisher object 'x8' with expected results expect_equal(x8$thetaH1[1, ], -88.724476, tolerance = 1e-07) expect_equal(x8$thetaH1[2, ], NA_real_) expect_equal(x8$assumedStDevs[1, ], 147.03819, tolerance = 1e-07) expect_equal(x8$assumedStDevs[2, ], NA_real_) expect_equal(x8$conditionalRejectionProbabilities[1, ], c(0.028559196, 0.34741778, NA_real_), tolerance = 1e-07) expect_equal(x8$conditionalRejectionProbabilities[2, ], c(0.038896649, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$conditionalPower[1, ], c(NA_real_, NA_real_, 0.878132), tolerance = 1e-07) expect_equal(x8$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x8$repeatedConfidenceIntervalLowerBounds[1, ], c(-198.85804, -189.35465, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalLowerBounds[2, ], c(-159.22325, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalUpperBounds[1, ], c(83.058044, 17.838621, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedConfidenceIntervalUpperBounds[2, ], c(35.509728, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedPValues[1, ], c(0.20789586, 0.036783191, NA_real_), tolerance = 1e-07) expect_equal(x8$repeatedPValues[2, ], c(0.15219281, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8), NA))) expect_output(print(x8)$show()) invisible(capture.output(expect_error(summary(x8), NA))) expect_output(summary(x8)$show()) x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL))) expect_equal(x8CodeBased$thetaH1, x8$thetaH1, tolerance = 1e-05) expect_equal(x8CodeBased$assumedStDevs, x8$assumedStDevs, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalRejectionProbabilities, x8$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalPower, x8$conditionalPower, tolerance = 1e-05) expect_equal(x8CodeBased$repeatedConfidenceIntervalLowerBounds, x8$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x8CodeBased$repeatedConfidenceIntervalUpperBounds, x8$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x8CodeBased$repeatedPValues, x8$repeatedPValues, tolerance = 1e-05) expect_type(names(x8), "character") df <- as.data.frame(x8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing Analysis Enrichment Means Function (two sub-populations)") test_that("'getAnalysisResults': stratified analysis, select S1 at first IA, gMax = 3", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedtTestEnrichment} S1 <- getDataset( sampleSize2 = c(12, 33, 21), sampleSize1 = c(18, 17, 23), mean2 = c(107.7, 77.7, 84.9), mean1 = c(125.6, 111.1, 99.9), stDev2 = c(128.5, 133.3, 84.9), stDev1 = c(120.1, 145.6, 74.3) ) S2 <- getDataset( sampleSize2 = c(14, NA, NA), sampleSize1 = c(11, NA, NA), mean2 = c(68.3, NA, NA), mean1 = c(100.1, NA, NA), stDev2 = c(124.0, NA, NA), stDev1 = c(116.8, NA, NA) ) S12 <- getDataset( sampleSize2 = c(21, 12, 33), sampleSize1 = c(21, 17, 31), mean2 = c(84.9, 107.7, 77.7), mean1 = c(135.9, 117.7, 97.7), stDev2 = c(139.5, 107.7, 77.7), stDev1 = c(185.0, 92.3, 87.3) ) R <- getDataset( sampleSize2 = c(33, NA, NA), sampleSize1 = c(19, NA, NA), mean2 = c(77.1, NA, NA), mean1 = c(142.4, NA, NA), stDev2 = c(163.5, NA, NA), stDev1 = c(120.6, NA, NA) ) dataInput1 <- getDataset(S1 = S1, S2 = S2, S12 = S12, R = R) ## Comparison of the results of DatasetMeans object 'dataInput1' with expected results expect_equal(dataInput1$overallSampleSizes, c(18, 11, 21, 19, 12, 14, 21, 33, 35, NA_real_, 38, NA_real_, 45, NA_real_, 33, NA_real_, 58, NA_real_, 69, NA_real_, 66, NA_real_, 66, NA_real_)) expect_equal(dataInput1$overallMeans, c(125.6, 100.1, 135.9, 142.4, 107.7, 68.3, 84.9, 77.1, 118.55714, NA_real_, 127.75789, NA_real_, 85.7, NA_real_, 93.190909, NA_real_, 111.15862, NA_real_, 114.25362, NA_real_, 85.445455, NA_real_, 85.445455, NA_real_), tolerance = 1e-07) expect_equal(dataInput1$overallStDevs, c(120.1, 116.8, 185, 120.6, 128.5, 124, 139.5, 163.5, 131.30971, NA_real_, 149.22508, NA_real_, 131.26649, NA_real_, 127.56945, NA_real_, 111.80482, NA_real_, 125.32216, NA_real_, 117.82181, NA_real_, 105.0948, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput1), NA))) expect_output(print(dataInput1)$show()) invisible(capture.output(expect_error(summary(dataInput1), NA))) expect_output(summary(dataInput1)$show()) dataInput1CodeBased <- eval(parse(text = getObjectRCode(dataInput1, stringWrapParagraphWidth = NULL))) expect_equal(dataInput1CodeBased$overallSampleSizes, dataInput1$overallSampleSizes, tolerance = 1e-05) expect_equal(dataInput1CodeBased$overallMeans, dataInput1$overallMeans, tolerance = 1e-05) expect_equal(dataInput1CodeBased$overallStDevs, dataInput1$overallStDevs, tolerance = 1e-05) expect_type(names(dataInput1), "character") df <- as.data.frame(dataInput1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': select S1 and S2 at first IA, select S1 at second, gMax = 3", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedtTestEnrichment} design1 <- getDesignInverseNormal( kMax = 3, alpha = 0.02, futilityBounds = c(-0.5, 0), bindingFutility = TRUE, typeOfDesign = "OF", informationRates = c(0.5, 0.7, 1) ) S1N <- getDataset( sampleSize1 = c(39, 34, NA), sampleSize2 = c(33, 45, NA), stDev1 = c(156.5026, 120.084, NA), stDev2 = c(134.0254, 126.502, NA), mean1 = c(131.146, 114.4, NA), mean2 = c(93.191, 85.7, NA) ) S2N <- getDataset( sampleSize1 = c(32, NA, NA), sampleSize2 = c(35, NA, NA), stDev1 = c(163.645, NA, NA), stDev2 = c(131.888, NA, NA), mean1 = c(123.594, NA, NA), mean2 = c(78.26, NA, NA) ) F <- getDataset( sampleSize1 = c(69, NA, NA), sampleSize2 = c(80, NA, NA), stDev1 = c(165.4682, NA, NA), stDev2 = c(143.9796, NA, NA), mean1 = c(129.2957, NA, NA), mean2 = c(82.1875, NA, NA) ) dataInput2 <- getDataset(S1 = S1N, S2 = S2N, F = F) ## Comparison of the results of DatasetMeans object 'dataInput2' with expected results expect_equal(dataInput2$overallSampleSizes, c(39, 32, 69, 33, 35, 80, 73, NA_real_, NA_real_, 78, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(dataInput2$overallMeans, c(131.146, 123.594, 129.2957, 93.191, 78.26, 82.1875, 123.34649, NA_real_, NA_real_, 88.869269, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(dataInput2$overallStDevs, c(156.5026, 163.645, 165.4682, 134.0254, 131.888, 143.9796, 140.02459, NA_real_, NA_real_, 128.93165, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput2), NA))) expect_output(print(dataInput2)$show()) invisible(capture.output(expect_error(summary(dataInput2), NA))) expect_output(summary(dataInput2)$show()) dataInput2CodeBased <- eval(parse(text = getObjectRCode(dataInput2, stringWrapParagraphWidth = NULL))) expect_equal(dataInput2CodeBased$overallSampleSizes, dataInput2$overallSampleSizes, tolerance = 1e-05) expect_equal(dataInput2CodeBased$overallMeans, dataInput2$overallMeans, tolerance = 1e-05) expect_equal(dataInput2CodeBased$overallStDevs, dataInput2$overallStDevs, tolerance = 1e-05) expect_type(names(dataInput2), "character") df <- as.data.frame(dataInput2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x1 <- getAnalysisResults( design = design1, dataInput = dataInput2, directionUpper = TRUE, normalApproximation = FALSE, varianceOption = "pooled", intersectionTest = "Sidak", stratifiedAnalysis = FALSE, stage = 2, nPlanned = c(80), allocationRatioPlanned = 2 ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x1' with expected results expect_equal(x1$thetaH1[1, ], 34.477224, tolerance = 1e-07) expect_equal(x1$thetaH1[2, ], NA_real_) expect_equal(x1$thetaH1[3, ], NA_real_) expect_equal(x1$assumedStDevs[1, ], 134.40636, tolerance = 1e-07) expect_equal(x1$assumedStDevs[2, ], NA_real_) expect_equal(x1$assumedStDevs[3, ], NA_real_) expect_equal(x1$conditionalRejectionProbabilities[1, ], c(0.016142454, 0.02613542, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[2, ], c(0.016142454, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[3, ], c(0.050007377, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower[1, ], c(NA_real_, NA_real_, 0.19507788), tolerance = 1e-07) expect_equal(x1$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[1, ], c(-81.45856, -34.885408, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[2, ], c(-79.606691, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[3, ], c(-38.192738, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[1, ], c(157.36856, 103.57092, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[2, ], c(170.27469, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[3, ], c(132.40914, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[1, ], c(0.34605439, 0.18712011, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[2, ], c(0.34605439, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[3, ], c(0.22233542, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$thetaH1, x1$thetaH1, tolerance = 1e-05) expect_equal(x1CodeBased$assumedStDevs, x1$assumedStDevs, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design3 <- getDesignInverseNormal( kMax = 3, alpha = 0.02, futilityBounds = c(-0.5, 0), bindingFutility = TRUE, typeOfDesign = "OF", informationRates = c(0.5, 0.7, 1) ) design2 <- getDesignFisher( kMax = 3, alpha = 0.02, alpha0Vec = c(0.7, 0.5), method = "equalAlpha", bindingFutility = TRUE, informationRates = c(0.3, 0.7, 1) ) x2 <- getAnalysisResults( design = design3, dataInput = dataInput2, directionUpper = TRUE, normalApproximation = FALSE, varianceOption = "notPooled", intersectionTest = "Simes", stratifiedAnalysis = FALSE, stage = 2, thetaH1 = c(50, 30, NA), assumedStDevs = c(122, 88, NA), nPlanned = 80, allocationRatioPlanned = 0.5 ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x2' with expected results expect_equal(x2$conditionalRejectionProbabilities[1, ], c(0.03098783, 0.056162964, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[2, ], c(0.03098783, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[3, ], c(0.045486533, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower[1, ], c(NA_real_, NA_real_, 0.55574729), tolerance = 1e-07) expect_equal(x2$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[1, ], c(-79.922689, -34.33441, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[2, ], c(-81.369964, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[3, ], c(-39.221831, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[1, ], c(155.83269, 103.18642, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[2, ], c(172.03796, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[3, ], c(133.43823, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[1, ], c(0.27466247, 0.13478543, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[2, ], c(0.27466247, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[3, ], c(0.23257404, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x3 <- getAnalysisResults( design = design2, dataInput = dataInput2, directionUpper = TRUE, normalApproximation = FALSE, varianceOption = "pooled", intersectionTest = "Sidak", stratifiedAnalysis = FALSE, stage = 2, nPlanned = 80, allocationRatioPlanned = 0.5 ) ## Comparison of the results of AnalysisResultsEnrichmentFisher object 'x3' with expected results expect_equal(x3$thetaH1[1, ], 34.477224, tolerance = 1e-07) expect_equal(x3$thetaH1[2, ], NA_real_) expect_equal(x3$thetaH1[3, ], NA_real_) expect_equal(x3$assumedStDevs[1, ], 134.40636, tolerance = 1e-07) expect_equal(x3$assumedStDevs[2, ], NA_real_) expect_equal(x3$assumedStDevs[3, ], NA_real_) expect_equal(x3$conditionalRejectionProbabilities[1, ], c(0.01300837, 0.0063168592, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities[2, ], c(0.01300837, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities[3, ], c(0.024114983, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower[1, ], c(NA_real_, NA_real_, 0.078920631), tolerance = 1e-07) expect_equal(x3$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[1, ], c(-58.494162, -30.46834, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[2, ], c(-55.474155, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[3, ], c(-22.271868, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[1, ], c(134.40416, 94.713072, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[2, ], c(146.14216, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[3, ], c(116.48827, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues[1, ], c(0.29239601, 0.21229229, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues[2, ], c(0.29239601, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues[3, ], c(0.15217469, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$thetaH1, x3$thetaH1, tolerance = 1e-05) expect_equal(x3CodeBased$assumedStDevs, x3$assumedStDevs, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x4 <- getAnalysisResults( design = design2, dataInput = dataInput2, directionUpper = TRUE, normalApproximation = FALSE, varianceOption = "notPooled", intersectionTest = "Simes", stratifiedAnalysis = FALSE, stage = 2, thetaH1 = c(50, NA, NA), assumedStDevs = c(122, NA, NA), nPlanned = 80, allocationRatioPlanned = 0.5 ) ## Comparison of the results of AnalysisResultsEnrichmentFisher object 'x4' with expected results expect_equal(x4$conditionalRejectionProbabilities[1, ], c(0.018024059, 0.0095704388, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities[2, ], c(0.018024059, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalRejectionProbabilities[3, ], c(0.022674244, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$conditionalPower[1, ], c(NA_real_, NA_real_, 0.26935817), tolerance = 1e-07) expect_equal(x4$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x4$repeatedConfidenceIntervalLowerBounds[1, ], c(-57.292213, -30.050759, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds[2, ], c(-56.802775, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalLowerBounds[3, ], c(-23.100932, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds[1, ], c(133.20221, 94.521132, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds[2, ], c(147.47078, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedConfidenceIntervalUpperBounds[3, ], c(117.31733, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues[1, ], c(0.20840036, 0.16345568, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues[2, ], c(0.20840036, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x4$repeatedPValues[3, ], c(0.16277762, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$conditionalRejectionProbabilities, x4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPower, x4$conditionalPower, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalLowerBounds, x4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedConfidenceIntervalUpperBounds, x4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x4CodeBased$repeatedPValues, x4$repeatedPValues, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing Analysis Enrichment Means Function (more sub-populations)") test_that("'getAnalysisResults': select S1 and S3 at first IA, select S1 at second, gMax = 4", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedtTestEnrichment} S1 <- getDataset( sampleSize1 = c(14, 22, 24), sampleSize2 = c(11, 18, 21), mean1 = c(68.3, 107.4, 101.2), mean2 = c(100.1, 140.9, 133.8), stDev1 = c(124.0, 134.7, 124.2), stDev2 = c(116.8, 133.7, 131.2) ) S2 <- getDataset( sampleSize1 = c(12, NA, NA), sampleSize2 = c(18, NA, NA), mean1 = c(107.7, NA, NA), mean2 = c(125.6, NA, NA), stDev1 = c(128.5, NA, NA), stDev2 = c(120.1, NA, NA) ) S3 <- getDataset( sampleSize1 = c(17, 24, NA), sampleSize2 = c(14, 19, NA), mean1 = c(64.3, 101.4, NA), mean2 = c(103.1, 170.4, NA), stDev1 = c(128.0, 125.3, NA), stDev2 = c(111.8, 143.6, NA) ) F <- getDataset( sampleSize1 = c(83, NA, NA), sampleSize2 = c(79, NA, NA), mean1 = c(77.1, NA, NA), mean2 = c(142.4, NA, NA), stDev1 = c(163.5, NA, NA), stDev2 = c(120.6, NA, NA) ) dataInput3 <- getDataset(S1 = S1, S2 = S2, S3 = S3, F = F) ## Comparison of the results of DatasetMeans object 'dataInput3' with expected results expect_equal(dataInput3$overallSampleSizes, c(14, 12, 17, 83, 11, 18, 14, 79, 36, NA_real_, 41, NA_real_, 29, NA_real_, 33, NA_real_, 60, NA_real_, NA_real_, NA_real_, 50, NA_real_, NA_real_, NA_real_)) expect_equal(dataInput3$overallMeans, c(68.3, 107.7, 64.3, 77.1, 100.1, 125.6, 103.1, 142.4, 92.194444, NA_real_, 86.017073, NA_real_, 125.42414, NA_real_, 141.84848, NA_real_, 95.796667, NA_real_, NA_real_, NA_real_, 128.942, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(dataInput3$overallStDevs, c(124, 128.5, 128, 163.5, 116.8, 120.1, 111.8, 120.6, 130.27375, NA_real_, 126.18865, NA_real_, 127.0088, NA_real_, 133.48411, NA_real_, 126.8892, NA_real_, NA_real_, NA_real_, 127.51934, NA_real_, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput3), NA))) expect_output(print(dataInput3)$show()) invisible(capture.output(expect_error(summary(dataInput3), NA))) expect_output(summary(dataInput3)$show()) dataInput3CodeBased <- eval(parse(text = getObjectRCode(dataInput3, stringWrapParagraphWidth = NULL))) expect_equal(dataInput3CodeBased$overallSampleSizes, dataInput3$overallSampleSizes, tolerance = 1e-05) expect_equal(dataInput3CodeBased$overallMeans, dataInput3$overallMeans, tolerance = 1e-05) expect_equal(dataInput3CodeBased$overallStDevs, dataInput3$overallStDevs, tolerance = 1e-05) expect_type(names(dataInput3), "character") df <- as.data.frame(dataInput3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design1 <- getDesignInverseNormal( kMax = 3, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.28, informationRates = c(0.5, 0.7, 1) ) x1 <- getAnalysisResults( design = design1, dataInput = dataInput3, directionUpper = FALSE, normalApproximation = FALSE, varianceOption = "notPooled", intersectionTest = "Simes", stratifiedAnalysis = FALSE ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x1' with expected results expect_equal(x1$thetaH1[1, ], -33.145333, tolerance = 1e-07) expect_equal(x1$thetaH1[2, ], NA_real_) expect_equal(x1$thetaH1[3, ], NA_real_) expect_equal(x1$thetaH1[4, ], NA_real_) expect_equal(x1$assumedStDevs[1, ], 127.17548, tolerance = 1e-07) expect_equal(x1$assumedStDevs[2, ], NA_real_) expect_equal(x1$assumedStDevs[3, ], NA_real_) expect_equal(x1$assumedStDevs[4, ], NA_real_) expect_equal(x1$conditionalRejectionProbabilities[1, ], c(0.0046188669, 0.003141658, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[2, ], c(0.0046188669, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[3, ], c(0.0046188669, 0.0093523023, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalRejectionProbabilities[4, ], c(0.41158519, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPower[4, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[1, ], c(-189.95235, -137.25075, -108.04127), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[2, ], c(-170.18127, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[3, ], c(-175.96326, -146.15913, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds[4, ], c(-132.10549, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[1, ], c(126.35235, 72.344345, 43.127962), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[2, ], c(134.38127, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[3, ], c(98.363257, 46.507217, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds[4, ], c(1.5054896, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[1, ], c(0.5, 0.35403281, 0.20618784), tolerance = 1e-07) expect_equal(x1$repeatedPValues[2, ], c(0.5, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[3, ], c(0.5, 0.26324129, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues[4, ], c(0.029329288, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$thetaH1, x1$thetaH1, tolerance = 1e-05) expect_equal(x1CodeBased$assumedStDevs, x1$assumedStDevs, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults': stratified analysis, gMax = 4", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedtTestEnrichment} S1 <- getDataset( sampleSize1 = c(14, 22, NA), sampleSize2 = c(11, 18, NA), mean1 = c(68.3, 107.4, NA), mean2 = c(100.1, 140.9, NA), stDev1 = c(124.0, 134.7, NA), stDev2 = c(116.8, 133.7, NA) ) S2 <- getDataset( sampleSize1 = c(12, NA, NA), sampleSize2 = c(18, NA, NA), mean1 = c(107.7, NA, NA), mean2 = c(125.6, NA, NA), stDev1 = c(128.5, NA, NA), stDev2 = c(120.1, NA, NA) ) S3 <- getDataset( sampleSize1 = c(17, 24, NA), sampleSize2 = c(14, 19, NA), mean1 = c(64.3, 101.4, NA), mean2 = c(103.1, 170.4, NA), stDev1 = c(128.0, 125.3, NA), stDev2 = c(111.8, 143.6, NA) ) S12 <- getDataset( sampleSize1 = c(21, 12, 33), sampleSize2 = c(21, 17, 31), mean1 = c(84.9, 107.7, 77.7), mean2 = c(135.9, 117.7, 97.7), stDev1 = c(139.5, 107.7, 77.7), stDev2 = c(185.0, 92.3, 87.3) ) S13 <- getDataset( sampleSize1 = c(21, 12, 33), sampleSize2 = c(21, 17, 31), mean1 = c(84.9, 107.7, 77.7), mean2 = c(135.9, 117.7, 97.7), stDev1 = c(139.5, 107.7, 77.7), stDev2 = c(185.0, 92.3, 87.3) ) S23 <- getDataset( sampleSize1 = c(21, 12, 33), sampleSize2 = c(21, 17, 31), mean1 = c(84.9, 107.7, 77.7), mean2 = c(135.9, 117.7, 97.7), stDev1 = c(139.5, 107.7, 77.7), stDev2 = c(185.0, 92.3, 87.3) ) S123 <- getDataset( sampleSize1 = c(21, 12, 33), sampleSize2 = c(21, 17, 31), mean1 = c(84.9, 107.7, 77.7), mean2 = c(135.9, 117.7, 97.7), stDev1 = c(139.5, 107.7, 77.7), stDev2 = c(185.0, 92.3, 87.3) ) R <- getDataset( sampleSize1 = c(33, NA, NA), sampleSize2 = c(19, NA, NA), mean1 = c(77.1, NA, NA), mean2 = c(142.4, NA, NA), stDev1 = c(163.5, NA, NA), stDev2 = c(120.6, NA, NA) ) dataInput4 <- getDataset(S1 = S1, S2 = S2, S3 = S3, S12 = S12, S23 = S23, S13 = S13, S123 = S123, R = R) ## Comparison of the results of DatasetMeans object 'dataInput4' with expected results expect_equal(dataInput4$overallSampleSizes, c(14, 12, 17, 21, 21, 21, 21, 33, 11, 18, 14, 21, 21, 21, 21, 19, 36, NA_real_, 41, 33, 33, 33, 33, NA_real_, 29, NA_real_, 33, 38, 38, 38, 38, NA_real_, NA_real_, NA_real_, NA_real_, 66, 66, 66, 66, NA_real_, NA_real_, NA_real_, NA_real_, 69, 69, 69, 69, NA_real_)) expect_equal(dataInput4$overallMeans, c(68.3, 107.7, 64.3, 84.9, 84.9, 84.9, 84.9, 77.1, 100.1, 125.6, 103.1, 135.9, 135.9, 135.9, 135.9, 142.4, 92.194444, NA_real_, 86.017073, 93.190909, 93.190909, 93.190909, 93.190909, NA_real_, 125.42414, NA_real_, 141.84848, 127.75789, 127.75789, 127.75789, 127.75789, NA_real_, NA_real_, NA_real_, NA_real_, 85.445455, 85.445455, 85.445455, 85.445455, NA_real_, NA_real_, NA_real_, NA_real_, 114.25362, 114.25362, 114.25362, 114.25362, NA_real_), tolerance = 1e-07) expect_equal(dataInput4$overallStDevs, c(124, 128.5, 128, 139.5, 139.5, 139.5, 139.5, 163.5, 116.8, 120.1, 111.8, 185, 185, 185, 185, 120.6, 130.27375, NA_real_, 126.18865, 127.56945, 127.56945, 127.56945, 127.56945, NA_real_, 127.0088, NA_real_, 133.48411, 149.22508, 149.22508, 149.22508, 149.22508, NA_real_, NA_real_, NA_real_, NA_real_, 105.0948, 105.0948, 105.0948, 105.0948, NA_real_, NA_real_, NA_real_, NA_real_, 125.32216, 125.32216, 125.32216, 125.32216, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput4), NA))) expect_output(print(dataInput4)$show()) invisible(capture.output(expect_error(summary(dataInput4), NA))) expect_output(summary(dataInput4)$show()) dataInput4CodeBased <- eval(parse(text = getObjectRCode(dataInput4, stringWrapParagraphWidth = NULL))) expect_equal(dataInput4CodeBased$overallSampleSizes, dataInput4$overallSampleSizes, tolerance = 1e-05) expect_equal(dataInput4CodeBased$overallMeans, dataInput4$overallMeans, tolerance = 1e-05) expect_equal(dataInput4CodeBased$overallStDevs, dataInput4$overallStDevs, tolerance = 1e-05) expect_type(names(dataInput4), "character") df <- as.data.frame(dataInput4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design1 <- getDesignInverseNormal( kMax = 3, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.28, informationRates = c(0.5, 0.7, 1) ) x2 <- getAnalysisResults( design = design1, dataInput = dataInput4, directionUpper = FALSE, normalApproximation = FALSE, varianceOption = "notPooled", intersectionTest = "Simes", stratifiedAnalysis = TRUE, stage = 2 ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x2' with expected results expect_equal(x2$thetaH1[1, ], -34.35943, tolerance = 1e-07) expect_equal(x2$thetaH1[2, ], NA_real_) expect_equal(x2$thetaH1[3, ], -39.831088, tolerance = 1e-07) expect_equal(x2$thetaH1[4, ], NA_real_) expect_equal(x2$assumedStDevs[1, ], 135.6664, tolerance = 1e-07) expect_equal(x2$assumedStDevs[2, ], NA_real_) expect_equal(x2$assumedStDevs[3, ], 135.69515, tolerance = 1e-07) expect_equal(x2$assumedStDevs[4, ], NA_real_) expect_equal(x2$conditionalRejectionProbabilities[1, ], c(0.14436944, 0.18888867, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[2, ], c(0.14436944, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[3, ], c(0.14436944, 0.23567728, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalRejectionProbabilities[4, ], c(0.33356756, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$conditionalPower[4, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[1, ], c(-124.13667, -87.790806, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[2, ], c(-119.97906, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[3, ], c(-122.68924, -91.731817, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds[4, ], c(-97.969856, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[1, ], c(28.41771, 15.834301, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[2, ], c(30.295343, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[3, ], c(25.470801, 9.1408918, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds[4, ], c(3.369313, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[1, ], c(0.096549841, 0.052699984, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[2, ], c(0.096549841, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[3, ], c(0.096549841, 0.042135201, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues[4, ], c(0.039953198, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$thetaH1, x2$thetaH1, tolerance = 1e-05) expect_equal(x2CodeBased$assumedStDevs, x2$assumedStDevs, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing Analysis Enrichment Means Function (more sub-populations)") test_that("'getAnalysisResults': select S1 at first IA, gMax = 3, no early efficacy stop", { .skipTestIfDisabled() # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISidakEnrichment} # @refFS[Formula]{fs:adjustedPValueForRCISpiessensEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:computeRCIsEnrichment} # @refFS[Formula]{fs:conditionalPowerEnrichment} # @refFS[Formula]{fs:conditionalRejectionProbabilityEnrichment} # @refFS[Formula]{fs:stratifiedtTestEnrichment} S1 <- getDataset( sampleSize1 = c(14, 22, 24), sampleSize2 = c(11, 18, 21), mean1 = c(68.3, 107.4, 101.2), mean2 = c(100.1, 140.9, 133.8), stDev1 = c(124.0, 134.7, 124.2), stDev2 = c(116.8, 133.7, 131.2) ) S2 <- getDataset( sampleSize1 = c(12, NA, NA), sampleSize2 = c(18, NA, NA), mean1 = c(107.7, NA, NA), mean2 = c(125.6, NA, NA), stDev1 = c(128.5, NA, NA), stDev2 = c(120.1, NA, NA) ) F <- getDataset( sampleSize1 = c(83, NA, NA), sampleSize2 = c(79, NA, NA), mean1 = c(77.1, NA, NA), mean2 = c(142.4, NA, NA), stDev1 = c(163.5, NA, NA), stDev2 = c(120.6, NA, NA) ) dataInput3 <- getDataset(S1 = S1, S2 = S2, F = F) ## Comparison of the results of DatasetMeans object 'dataInput3' with expected results expect_equal(dataInput3$overallSampleSizes, c(14, 12, 83, 11, 18, 79, 36, NA_real_, NA_real_, 29, NA_real_, NA_real_, 60, NA_real_, NA_real_, 50, NA_real_, NA_real_)) expect_equal(dataInput3$overallMeans, c(68.3, 107.7, 77.1, 100.1, 125.6, 142.4, 92.194444, NA_real_, NA_real_, 125.42414, NA_real_, NA_real_, 95.796667, NA_real_, NA_real_, 128.942, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(dataInput3$overallStDevs, c(124, 128.5, 163.5, 116.8, 120.1, 120.6, 130.27375, NA_real_, NA_real_, 127.0088, NA_real_, NA_real_, 126.8892, NA_real_, NA_real_, 127.51934, NA_real_, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dataInput3), NA))) expect_output(print(dataInput3)$show()) invisible(capture.output(expect_error(summary(dataInput3), NA))) expect_output(summary(dataInput3)$show()) dataInput3CodeBased <- eval(parse(text = getObjectRCode(dataInput3, stringWrapParagraphWidth = NULL))) expect_equal(dataInput3CodeBased$overallSampleSizes, dataInput3$overallSampleSizes, tolerance = 1e-05) expect_equal(dataInput3CodeBased$overallMeans, dataInput3$overallMeans, tolerance = 1e-05) expect_equal(dataInput3CodeBased$overallStDevs, dataInput3$overallStDevs, tolerance = 1e-05) expect_type(names(dataInput3), "character") df <- as.data.frame(dataInput3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dataInput3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } design3 <- getDesignInverseNormal( kMax = 3, alpha = 0.025, typeOfDesign = "noEarlyEfficacy", informationRates = c(0.4, 0.7, 1) ) x3 <- getAnalysisResults( design = design3, dataInput = dataInput3, thetaH0 = 30, directionUpper = FALSE, normalApproximation = FALSE, varianceOption = "notPooled", intersectionTest = "Simes", stratifiedAnalysis = FALSE ) ## Comparison of the results of AnalysisResultsEnrichmentInverseNormal object 'x3' with expected results expect_equal(x3$thetaH1[1, ], -33.145333, tolerance = 1e-07) expect_equal(x3$thetaH1[2, ], NA_real_) expect_equal(x3$thetaH1[3, ], NA_real_) expect_equal(x3$assumedStDevs[1, ], 127.17548, tolerance = 1e-07) expect_equal(x3$assumedStDevs[2, ], NA_real_) expect_equal(x3$assumedStDevs[3, ], NA_real_) expect_equal(x3$conditionalRejectionProbabilities[1, ], c(0.043562209, 0.16805804, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities[2, ], c(0.043562209, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalRejectionProbabilities[3, ], c(0.72997271, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPower[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[1, ], c(NA_real_, NA_real_, -94.8291), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalLowerBounds[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[1, ], c(NA_real_, NA_real_, 29.811159), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$repeatedConfidenceIntervalUpperBounds[3, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$repeatedPValues[1, ], c(NA_real_, NA_real_, 0.010432269), tolerance = 1e-07) expect_equal(x3$repeatedPValues[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$repeatedPValues[3, ], c(NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$thetaH1, x3$thetaH1, tolerance = 1e-05) expect_equal(x3CodeBased$assumedStDevs, x3$assumedStDevs, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) rpact/tests/testthat/test-f_simulation_enrichment_means.R0000644000176200001440000014206114372422771023572 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_simulation_enrichment_means.R ## | Creation date: 06 February 2023, 12:13:59 ## | File version: $Revision: 6810 $ ## | Last changed: $Date: 2023-02-13 12:58:47 +0100 (Mo, 13 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Simulation Enrichment Means Function") test_that("'getSimulationEnrichmentMeans': gMax = 2", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:subsec:adaptiveClosedTestProcedureEnrichment} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:stratifiedtTestEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:simulationEnrichmentMeansGenerate} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} # do not remove # m <- c() # for (effect1 in seq(0, 0.5, 0.25)) { # for (effect2 in seq(0, 0.5, 0.25)) { # m <- c(m, effect1, effect2) # } # } # effects <- matrix(m, byrow = TRUE, ncol = 2) effects <- matrix(c(0, 0, 0, 0.25, 0.25, 0.25, 0.5, 0.5, 0.5, 0, 0.25, 0.5, 0, 0.25, 0.5, 0, 0.25, 0.5), ncol = 2) effectList <- list(subGroups = c("S", "R"), prevalences = c(0.2, 0.8), stDevs = 0.8, effects = effects) design <- getDesignInverseNormal(informationRates = c(0.3, 1), typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.025)) suppressWarnings(simResult1 <- getSimulationEnrichmentMeans(design, plannedSubjects = c(60, 160), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 0.5, typeOfSelection = "epsilon", epsilonValue = 0.1, successCriterion = "atLeastOne", intersectionTest = "SpiessensDebois", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentMeans object 'simResult1' with expected results expect_equal(simResult1$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult1$iterations[2, ], c(98, 95, 78, 99, 86, 71, 94, 85, 50)) expect_equal(simResult1$rejectAtLeastOne, c(0.03, 0.16, 0.67, 0.18, 0.42, 0.74, 0.75, 0.75, 0.92), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$rejectedPopulationsPerStage)), c(0.01, 0.01, 0, 0, 0.02, 0.01, 0.01, 0.16, 0.02, 0.15, 0, 0.06, 0.05, 0.67, 0.05, 0.49, 0.08, 0.21, 0.01, 0, 0.05, 0.11, 0.22, 0.44, 0.01, 0.01, 0.13, 0.14, 0.29, 0.39, 0.01, 0.02, 0.13, 0.13, 0.49, 0.24), tolerance = 1e-07) expect_equal(simResult1$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult1$earlyStop[1, ], c(0.02, 0.05, 0.22, 0.01, 0.14, 0.29, 0.06, 0.15, 0.5), tolerance = 1e-07) expect_equal(simResult1$successPerStage[1, ], c(0.02, 0.05, 0.22, 0.01, 0.14, 0.29, 0.06, 0.15, 0.5), tolerance = 1e-07) expect_equal(simResult1$successPerStage[2, ], c(0.01, 0.11, 0.45, 0.17, 0.28, 0.45, 0.69, 0.6, 0.42), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$selectedPopulations)), c(1, 0.52, 1, 0.37, 1, 0.23, 1, 0.7, 1, 0.5, 1, 0.34, 1, 0.82, 1, 0.66, 1, 0.31, 1, 0.65, 1, 0.7, 1, 0.67, 1, 0.41, 1, 0.52, 1, 0.51, 1, 0.18, 1, 0.37, 1, 0.29), tolerance = 1e-07) expect_equal(simResult1$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult1$numberOfPopulations[2, ], c(1.1938776, 1.1263158, 1.1538462, 1.1212121, 1.1860465, 1.1971831, 1.0638298, 1.2117647, 1.2), tolerance = 1e-07) expect_equal(simResult1$expectedNumberOfSubjects, c(158, 155, 138, 159, 146, 131, 154, 145, 110)) expect_equal(unlist(as.list(simResult1$sampleSizes)), c(12, 46.938776, 12, 41.052632, 12, 31.282051, 12, 66.868687, 12, 51.627907, 12, 42.535211, 12, 84.680851, 12, 65.176471, 12, 53.6, 48, 53.061224, 48, 58.947368, 48, 68.717949, 48, 33.131313, 48, 48.372093, 48, 57.464789, 48, 15.319149, 48, 34.823529, 48, 46.4), tolerance = 1e-07) expect_equal(simResult1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult1$conditionalPowerAchieved[2, ], c(0.047488291, 0.14634991, 0.18288786, 0.12148547, 0.21896362, 0.33298102, 0.17634955, 0.32251361, 0.45476897), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult1), NA))) expect_output(print(simResult1)$show()) invisible(capture.output(expect_error(summary(simResult1), NA))) expect_output(summary(simResult1)$show()) suppressWarnings(simResult1CodeBased <- eval(parse(text = getObjectRCode(simResult1, stringWrapParagraphWidth = NULL)))) expect_equal(simResult1CodeBased$iterations, simResult1$iterations, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectAtLeastOne, simResult1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectedPopulationsPerStage, simResult1$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$futilityPerStage, simResult1$futilityPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$earlyStop, simResult1$earlyStop, tolerance = 1e-05) expect_equal(simResult1CodeBased$successPerStage, simResult1$successPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$selectedPopulations, simResult1$selectedPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$numberOfPopulations, simResult1$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$expectedNumberOfSubjects, simResult1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult1CodeBased$sampleSizes, simResult1$sampleSizes, tolerance = 1e-05) expect_equal(simResult1CodeBased$conditionalPowerAchieved, simResult1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult1), "character") df <- as.data.frame(simResult1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult2 <- getSimulationEnrichmentMeans(design, plannedSubjects = c(60, 160), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 0.5, typeOfSelection = "rBest", rValue = 2, successCriterion = "atLeastOne", intersectionTest = "Simes", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentMeans object 'simResult2' with expected results expect_equal(simResult2$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult2$iterations[2, ], c(100, 100, 77, 98, 89, 75, 92, 87, 56)) expect_equal(simResult2$rejectAtLeastOne, c(0.01, 0.13, 0.7, 0.05, 0.41, 0.78, 0.24, 0.49, 0.94), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$rejectedPopulationsPerStage)), c(0, 0, 0, 0.03, 0.01, 0.04, 0.01, 0.02, 0.04, 0.03, 0.02, 0.05, 0.07, 0.15, 0.07, 0.22, 0.04, 0.17, 0, 0.01, 0, 0.13, 0.23, 0.47, 0.01, 0.01, 0.08, 0.29, 0.25, 0.53, 0.01, 0.04, 0.09, 0.33, 0.44, 0.5), tolerance = 1e-07) expect_equal(simResult2$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult2$earlyStop[1, ], c(0, 0, 0.23, 0.02, 0.11, 0.25, 0.08, 0.13, 0.44), tolerance = 1e-07) expect_equal(simResult2$successPerStage[1, ], c(0, 0, 0.23, 0.02, 0.11, 0.25, 0.08, 0.13, 0.44), tolerance = 1e-07) expect_equal(simResult2$successPerStage[2, ], c(0.01, 0.13, 0.47, 0.03, 0.3, 0.53, 0.16, 0.36, 0.5), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$selectedPopulations)), c(1, 1, 1, 1, 1, 0.77, 1, 0.98, 1, 0.89, 1, 0.75, 1, 0.92, 1, 0.87, 1, 0.56, 1, 1, 1, 1, 1, 0.77, 1, 0.98, 1, 0.89, 1, 0.75, 1, 0.92, 1, 0.87, 1, 0.56), tolerance = 1e-07) expect_equal(simResult2$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult2$numberOfPopulations[2, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult2$expectedNumberOfSubjects, c(160, 160, 137, 158, 149, 135, 152, 147, 116), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$sampleSizes)), c(12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 48, 80, 48, 80, 48, 80, 48, 80, 48, 80, 48, 80, 48, 80, 48, 80, 48, 80)) expect_equal(simResult2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult2$conditionalPowerAchieved[2, ], c(0.068305544, 0.20988473, 0.20468607, 0.13306892, 0.26809268, 0.3042488, 0.16765633, 0.35488797, 0.3840908), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult2), NA))) expect_output(print(simResult2)$show()) invisible(capture.output(expect_error(summary(simResult2), NA))) expect_output(summary(simResult2)$show()) suppressWarnings(simResult2CodeBased <- eval(parse(text = getObjectRCode(simResult2, stringWrapParagraphWidth = NULL)))) expect_equal(simResult2CodeBased$iterations, simResult2$iterations, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectAtLeastOne, simResult2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectedPopulationsPerStage, simResult2$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$futilityPerStage, simResult2$futilityPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$earlyStop, simResult2$earlyStop, tolerance = 1e-05) expect_equal(simResult2CodeBased$successPerStage, simResult2$successPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$selectedPopulations, simResult2$selectedPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$numberOfPopulations, simResult2$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$expectedNumberOfSubjects, simResult2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult2CodeBased$sampleSizes, simResult2$sampleSizes, tolerance = 1e-05) expect_equal(simResult2CodeBased$conditionalPowerAchieved, simResult2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult2), "character") df <- as.data.frame(simResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult3 <- getSimulationEnrichmentMeans(design, plannedSubjects = c(60, 160), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 0.5, typeOfSelection = "all", successCriterion = "atLeastOne", intersectionTest = "Sidak", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentMeans object 'simResult3' with expected results expect_equal(simResult3$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult3$iterations[2, ], c(100, 100, 76, 98, 90, 76, 92, 88, 56)) expect_equal(simResult3$rejectAtLeastOne, c(0, 0.13, 0.7, 0.05, 0.41, 0.79, 0.24, 0.48, 0.94), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$rejectedPopulationsPerStage)), c(0, 0, 0, 0.03, 0.01, 0.04, 0.01, 0.02, 0.03, 0.04, 0.01, 0.06, 0.07, 0.15, 0.06, 0.23, 0.04, 0.17, 0, 0, 0, 0.13, 0.24, 0.46, 0.01, 0.01, 0.07, 0.3, 0.24, 0.55, 0.01, 0.04, 0.08, 0.33, 0.44, 0.5), tolerance = 1e-07) expect_equal(simResult3$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult3$earlyStop[1, ], c(0, 0, 0.24, 0.02, 0.1, 0.24, 0.08, 0.12, 0.44), tolerance = 1e-07) expect_equal(simResult3$successPerStage[1, ], c(0, 0, 0.24, 0.02, 0.1, 0.24, 0.08, 0.12, 0.44), tolerance = 1e-07) expect_equal(simResult3$successPerStage[2, ], c(0, 0.13, 0.46, 0.03, 0.31, 0.55, 0.16, 0.36, 0.5), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$selectedPopulations)), c(1, 1, 1, 1, 1, 0.76, 1, 0.98, 1, 0.9, 1, 0.76, 1, 0.92, 1, 0.88, 1, 0.56, 1, 1, 1, 1, 1, 0.76, 1, 0.98, 1, 0.9, 1, 0.76, 1, 0.92, 1, 0.88, 1, 0.56), tolerance = 1e-07) expect_equal(simResult3$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult3$numberOfPopulations[2, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult3$expectedNumberOfSubjects, c(160, 160, 136, 158, 150, 136, 152, 148, 116), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$sampleSizes)), c(12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 48, 80, 48, 80, 48, 80, 48, 80, 48, 80, 48, 80, 48, 80, 48, 80, 48, 80)) expect_equal(simResult3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult3$conditionalPowerAchieved[2, ], c(0.068305544, 0.20988473, 0.2073793, 0.13306892, 0.27600384, 0.31320424, 0.16765633, 0.36196259, 0.3840908), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult3), NA))) expect_output(print(simResult3)$show()) invisible(capture.output(expect_error(summary(simResult3), NA))) expect_output(summary(simResult3)$show()) suppressWarnings(simResult3CodeBased <- eval(parse(text = getObjectRCode(simResult3, stringWrapParagraphWidth = NULL)))) expect_equal(simResult3CodeBased$iterations, simResult3$iterations, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectAtLeastOne, simResult3$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectedPopulationsPerStage, simResult3$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$futilityPerStage, simResult3$futilityPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$earlyStop, simResult3$earlyStop, tolerance = 1e-05) expect_equal(simResult3CodeBased$successPerStage, simResult3$successPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$selectedPopulations, simResult3$selectedPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$numberOfPopulations, simResult3$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$expectedNumberOfSubjects, simResult3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult3CodeBased$sampleSizes, simResult3$sampleSizes, tolerance = 1e-05) expect_equal(simResult3CodeBased$conditionalPowerAchieved, simResult3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult3), "character") df <- as.data.frame(simResult3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult4 <- getSimulationEnrichmentMeans(design, plannedSubjects = c(60, 160), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 0.5, typeOfSelection = "epsilon", epsilonValue = 0.1, successCriterion = "all", intersectionTest = "Bonferroni", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentMeans object 'simResult4' with expected results expect_equal(simResult4$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult4$iterations[2, ], c(62, 86, 95, 80, 91, 98, 95, 93, 93)) expect_equal(simResult4$rejectAtLeastOne, c(0.01, 0.15, 0.63, 0.17, 0.39, 0.71, 0.69, 0.73, 0.9), tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$rejectedPopulationsPerStage)), c(0, 0, 0, 0, 0.02, 0, 0.01, 0.16, 0.02, 0.19, 0, 0.07, 0.05, 0.62, 0.05, 0.52, 0.08, 0.36, 0.01, 0, 0.05, 0.1, 0.22, 0.41, 0.01, 0, 0.12, 0.11, 0.29, 0.37, 0.01, 0.02, 0.13, 0.12, 0.49, 0.23), tolerance = 1e-07) expect_equal(simResult4$futilityPerStage[1, ], c(0.38, 0.14, 0.03, 0.19, 0.08, 0.02, 0.05, 0.04, 0), tolerance = 1e-07) expect_equal(simResult4$earlyStop[1, ], c(0.38, 0.14, 0.05, 0.2, 0.09, 0.02, 0.05, 0.07, 0.07), tolerance = 1e-07) expect_equal(simResult4$successPerStage[1, ], c(0, 0, 0.02, 0.01, 0.01, 0, 0, 0.03, 0.07), tolerance = 1e-07) expect_equal(simResult4$successPerStage[2, ], c(0, 0.13, 0.53, 0.16, 0.3, 0.57, 0.68, 0.61, 0.76), tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$selectedPopulations)), c(1, 0.43, 1, 0.34, 1, 0.26, 1, 0.65, 1, 0.55, 1, 0.41, 1, 0.84, 1, 0.75, 1, 0.51, 1, 0.3, 1, 0.62, 1, 0.8, 1, 0.23, 1, 0.54, 1, 0.75, 1, 0.15, 1, 0.4, 1, 0.59), tolerance = 1e-07) expect_equal(simResult4$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult4$numberOfPopulations[2, ], c(1.1774194, 1.1162791, 1.1157895, 1.1, 1.1978022, 1.1836735, 1.0421053, 1.2365591, 1.1827957), tolerance = 1e-07) expect_equal(simResult4$expectedNumberOfSubjects, c(122, 146, 155, 140, 151, 158, 155, 153, 153)) expect_equal(unlist(as.list(simResult4$sampleSizes)), c(12, 61.290323, 12, 42.325581, 12, 32.631579, 12, 77, 12, 52.527473, 12, 38.77551, 12, 87.368421, 12, 65.591398, 12, 49.247312, 48, 38.709677, 48, 57.674419, 48, 67.368421, 48, 23, 48, 47.472527, 48, 61.22449, 48, 12.631579, 48, 34.408602, 48, 50.752688), tolerance = 1e-07) expect_equal(simResult4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult4$conditionalPowerAchieved[2, ], c(0.10066083, 0.19572583, 0.27485551, 0.15033827, 0.32882422, 0.47317914, 0.22494724, 0.41529639, 0.62724251), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult4), NA))) expect_output(print(simResult4)$show()) invisible(capture.output(expect_error(summary(simResult4), NA))) expect_output(summary(simResult4)$show()) suppressWarnings(simResult4CodeBased <- eval(parse(text = getObjectRCode(simResult4, stringWrapParagraphWidth = NULL)))) expect_equal(simResult4CodeBased$iterations, simResult4$iterations, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectAtLeastOne, simResult4$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectedPopulationsPerStage, simResult4$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$futilityPerStage, simResult4$futilityPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$earlyStop, simResult4$earlyStop, tolerance = 1e-05) expect_equal(simResult4CodeBased$successPerStage, simResult4$successPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$selectedPopulations, simResult4$selectedPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$numberOfPopulations, simResult4$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$expectedNumberOfSubjects, simResult4$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult4CodeBased$sampleSizes, simResult4$sampleSizes, tolerance = 1e-05) expect_equal(simResult4CodeBased$conditionalPowerAchieved, simResult4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult4), "character") df <- as.data.frame(simResult4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationEnrichmentMeans': gMax = 3", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:subsec:adaptiveClosedTestProcedureEnrichment} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:stratifiedtTestEnrichment} # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:simulationEnrichmentMeansGenerate} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} effectList <- list( subGroups = c("S1", "S2", "S12", "R"), prevalences = c(0.05, 0.35, 0.15, 0.45), stDevs = c(2.2, 2.2, 2.2, 2.2), effects = matrix(c( 0.3, 1.1, 0.2, 1.2, 2.3, 3.1, 0.9, 1.2, 3.1, 3.4, 0.3, 0.2, 1.2, 2.4, 3.7, 2.1 ), byrow = TRUE, ncol = 4) ) design <- getDesignInverseNormal(informationRates = c(0.4, 0.8, 1), typeOfDesign = "noEarlyEfficacy") suppressWarnings(simResult1 <- getSimulationEnrichmentMeans(design, plannedSubjects = c(20, 40, 50), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 2, typeOfSelection = "rBest", rValue = 2, adaptations = c(TRUE, FALSE), intersectionTest = "Sidak", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentMeans object 'simResult1' with expected results expect_equal(simResult1$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simResult1$iterations[2, ], c(100, 100, 100, 100)) expect_equal(simResult1$iterations[3, ], c(100, 100, 100, 100)) expect_equal(simResult1$rejectAtLeastOne, c(0.13, 0.74, 0.67, 0.93), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$rejectedPopulationsPerStage)), c(0, 0, 0.01, 0, 0, 0.09, 0, 0, 0.09, 0, 0, 0.5, 0, 0, 0.04, 0, 0, 0.59, 0, 0, 0.59, 0, 0, 0.66, 0, 0, 0.11, 0, 0, 0.47, 0, 0, 0.31, 0, 0, 0.55), tolerance = 1e-07) expect_equal(simResult1$futilityStop, c(0, 0, 0, 0)) expect_equal(simResult1$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult1$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult1$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(simResult1$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(simResult1$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult1$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult1$successPerStage[3, ], c(0.03, 0.41, 0.32, 0.78), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$selectedPopulations)), c(1, 0.54, 0.54, 1, 0.41, 0.41, 1, 0.43, 0.43, 1, 0.64, 0.64, 1, 0.68, 0.68, 1, 0.91, 0.91, 1, 0.94, 0.94, 1, 0.74, 0.74, 1, 0.78, 0.78, 1, 0.68, 0.68, 1, 0.63, 0.63, 1, 0.62, 0.62), tolerance = 1e-07) expect_equal(simResult1$numberOfPopulations[1, ], c(3, 3, 3, 3)) expect_equal(simResult1$numberOfPopulations[2, ], c(2, 2, 2, 2)) expect_equal(simResult1$numberOfPopulations[3, ], c(2, 2, 2, 2)) expect_equal(simResult1$expectedNumberOfSubjects, c(50, 50, 50, 50), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$sampleSizes)), c(1, 1.18, 0.59, 1, 1.2618182, 0.63090909, 1, 1.3027273, 0.65136364, 1, 1.3109091, 0.65545455, 7, 8.26, 4.13, 7, 8.8327273, 4.4163636, 7, 9.1190909, 4.5595455, 7, 9.1763636, 4.5881818, 3, 3.54, 1.77, 3, 3.7854545, 1.8927273, 3, 3.9081818, 1.9540909, 3, 3.9327273, 1.9663636, 9, 7.02, 3.51, 9, 6.12, 3.06, 9, 5.67, 2.835, 9, 5.58, 2.79), tolerance = 1e-07) expect_equal(simResult1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult1$conditionalPowerAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simResult1$conditionalPowerAchieved[3, ], c(0.141872, 0.6853746, 0.62195245, 0.87969243), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult1), NA))) expect_output(print(simResult1)$show()) invisible(capture.output(expect_error(summary(simResult1), NA))) expect_output(summary(simResult1)$show()) suppressWarnings(simResult1CodeBased <- eval(parse(text = getObjectRCode(simResult1, stringWrapParagraphWidth = NULL)))) expect_equal(simResult1CodeBased$iterations, simResult1$iterations, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectAtLeastOne, simResult1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectedPopulationsPerStage, simResult1$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$futilityStop, simResult1$futilityStop, tolerance = 1e-05) expect_equal(simResult1CodeBased$futilityPerStage, simResult1$futilityPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$earlyStop, simResult1$earlyStop, tolerance = 1e-05) expect_equal(simResult1CodeBased$successPerStage, simResult1$successPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$selectedPopulations, simResult1$selectedPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$numberOfPopulations, simResult1$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$expectedNumberOfSubjects, simResult1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult1CodeBased$sampleSizes, simResult1$sampleSizes, tolerance = 1e-05) expect_equal(simResult1CodeBased$conditionalPowerAchieved, simResult1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult1), "character") df <- as.data.frame(simResult1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult2 <- getSimulationEnrichmentMeans(design, plannedSubjects = c(20, 40, 50), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 2, typeOfSelection = "best", intersectionTest = "Simes", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentMeans object 'simResult2' with expected results expect_equal(simResult2$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simResult2$iterations[2, ], c(100, 100, 100, 100)) expect_equal(simResult2$iterations[3, ], c(100, 100, 100, 100)) expect_equal(simResult2$rejectAtLeastOne, c(0.1, 0.86, 0.64, 0.95), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$rejectedPopulationsPerStage)), c(0, 0, 0, 0, 0, 0.11, 0, 0, 0.09, 0, 0, 0.45, 0, 0, 0.07, 0, 0, 0.52, 0, 0, 0.5, 0, 0, 0.36, 0, 0, 0.03, 0, 0, 0.23, 0, 0, 0.05, 0, 0, 0.14), tolerance = 1e-07) expect_equal(simResult2$futilityStop, c(0, 0, 0, 0)) expect_equal(simResult2$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult2$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult2$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(simResult2$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(simResult2$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult2$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult2$successPerStage[3, ], c(0.1, 0.86, 0.64, 0.95), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$selectedPopulations)), c(1, 0.38, 0.38, 1, 0.15, 0.15, 1, 0.28, 0.28, 1, 0.46, 0.46, 1, 0.32, 0.32, 1, 0.6, 0.6, 1, 0.59, 0.59, 1, 0.37, 0.37, 1, 0.3, 0.3, 1, 0.25, 0.25, 1, 0.13, 0.13, 1, 0.17, 0.17), tolerance = 1e-07) expect_equal(simResult2$numberOfPopulations[1, ], c(3, 3, 3, 3)) expect_equal(simResult2$numberOfPopulations[2, ], c(1, 1, 1, 1)) expect_equal(simResult2$numberOfPopulations[3, ], c(1, 1, 1, 1)) expect_equal(simResult2$expectedNumberOfSubjects, c(50, 50, 50, 50)) expect_equal(unlist(as.list(simResult2$sampleSizes)), c(1, 2.2, 1.1, 1, 1, 0.5, 1, 1.53, 0.765, 1, 2.47, 1.235, 7, 6.58, 3.29, 7, 10.15, 5.075, 7, 9.17, 4.585, 7, 6.37, 3.185, 3, 8.52, 4.26, 3, 6.6, 3.3, 3, 8.13, 4.065, 3, 9.63, 4.815, 9, 2.7, 1.35, 9, 2.25, 1.125, 9, 1.17, 0.585, 9, 1.53, 0.765), tolerance = 1e-07) expect_equal(simResult2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult2$conditionalPowerAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simResult2$conditionalPowerAchieved[3, ], c(0.17206636, 0.78936816, 0.62458725, 0.9248007), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult2), NA))) expect_output(print(simResult2)$show()) invisible(capture.output(expect_error(summary(simResult2), NA))) expect_output(summary(simResult2)$show()) suppressWarnings(simResult2CodeBased <- eval(parse(text = getObjectRCode(simResult2, stringWrapParagraphWidth = NULL)))) expect_equal(simResult2CodeBased$iterations, simResult2$iterations, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectAtLeastOne, simResult2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectedPopulationsPerStage, simResult2$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$futilityStop, simResult2$futilityStop, tolerance = 1e-05) expect_equal(simResult2CodeBased$futilityPerStage, simResult2$futilityPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$earlyStop, simResult2$earlyStop, tolerance = 1e-05) expect_equal(simResult2CodeBased$successPerStage, simResult2$successPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$selectedPopulations, simResult2$selectedPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$numberOfPopulations, simResult2$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$expectedNumberOfSubjects, simResult2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult2CodeBased$sampleSizes, simResult2$sampleSizes, tolerance = 1e-05) expect_equal(simResult2CodeBased$conditionalPowerAchieved, simResult2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult2), "character") df <- as.data.frame(simResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult3 <- getSimulationEnrichmentMeans(design, plannedSubjects = c(20, 40, 50), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 2, typeOfSelection = "epsilon", epsilonValue = 0.1, intersectionTest = "Bonferroni", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentMeans object 'simResult3' with expected results expect_equal(simResult3$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simResult3$iterations[2, ], c(75, 96, 91, 99)) expect_equal(simResult3$iterations[3, ], c(74, 96, 91, 99)) expect_equal(simResult3$rejectAtLeastOne, c(0.17, 0.71, 0.67, 0.96), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$rejectedPopulationsPerStage)), c(0, 0, 0.04, 0, 0, 0.07, 0, 0, 0.06, 0, 0, 0.49, 0, 0, 0.07, 0, 0, 0.44, 0, 0, 0.57, 0, 0, 0.32, 0, 0, 0.06, 0, 0, 0.2, 0, 0, 0.04, 0, 0, 0.16), tolerance = 1e-07) expect_equal(simResult3$futilityStop, c(0.26, 0.04, 0.09, 0.01), tolerance = 1e-07) expect_equal(simResult3$futilityPerStage[1, ], c(0.25, 0.04, 0.09, 0.01), tolerance = 1e-07) expect_equal(simResult3$futilityPerStage[2, ], c(0.01, 0, 0, 0), tolerance = 1e-07) expect_equal(simResult3$earlyStop[1, ], c(0.25, 0.04, 0.09, 0.01), tolerance = 1e-07) expect_equal(simResult3$earlyStop[2, ], c(0.01, 0, 0, 0), tolerance = 1e-07) expect_equal(simResult3$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult3$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult3$successPerStage[3, ], c(0.17, 0.71, 0.67, 0.96), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$selectedPopulations)), c(1, 0.26, 0.25, 1, 0.2, 0.19, 1, 0.21, 0.17, 1, 0.51, 0.5, 1, 0.27, 0.26, 1, 0.54, 0.51, 1, 0.65, 0.65, 1, 0.33, 0.33, 1, 0.28, 0.23, 1, 0.28, 0.26, 1, 0.13, 0.09, 1, 0.19, 0.17), tolerance = 1e-07) expect_equal(simResult3$numberOfPopulations[1, ], c(3, 3, 3, 3)) expect_equal(simResult3$numberOfPopulations[2, ], c(1.08, 1.0625, 1.0879121, 1.040404), tolerance = 1e-07) expect_equal(simResult3$numberOfPopulations[3, ], c(1, 1, 1, 1.010101), tolerance = 1e-07) expect_equal(simResult3$expectedNumberOfSubjects, c(42.4, 48.8, 47.3, 49.7), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$sampleSizes)), c(1, 2.0642424, 1, 1, 1.2481061, 0.63020833, 1, 1.1368631, 0.51648352, 1, 2.6023875, 1.3324151, 7, 6.7030303, 3.5472973, 7, 9.3200758, 4.6666667, 7, 10.342657, 5.3461538, 7, 5.9843893, 2.9279155, 3, 7.8727273, 4.0540541, 3, 6.8068182, 3.484375, 3, 7.2347652, 3.6923077, 3, 9.6859504, 4.9669421, 9, 3.36, 1.3986486, 9, 2.625, 1.21875, 9, 1.2857143, 0.44505495, 9, 1.7272727, 0.77272727), tolerance = 1e-07) expect_equal(simResult3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult3$conditionalPowerAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simResult3$conditionalPowerAchieved[3, ], c(0.31528596, 0.78554895, 0.74702653, 0.96322984), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult3), NA))) expect_output(print(simResult3)$show()) invisible(capture.output(expect_error(summary(simResult3), NA))) expect_output(summary(simResult3)$show()) suppressWarnings(simResult3CodeBased <- eval(parse(text = getObjectRCode(simResult3, stringWrapParagraphWidth = NULL)))) expect_equal(simResult3CodeBased$iterations, simResult3$iterations, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectAtLeastOne, simResult3$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectedPopulationsPerStage, simResult3$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$futilityStop, simResult3$futilityStop, tolerance = 1e-05) expect_equal(simResult3CodeBased$futilityPerStage, simResult3$futilityPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$earlyStop, simResult3$earlyStop, tolerance = 1e-05) expect_equal(simResult3CodeBased$successPerStage, simResult3$successPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$selectedPopulations, simResult3$selectedPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$numberOfPopulations, simResult3$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$expectedNumberOfSubjects, simResult3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult3CodeBased$sampleSizes, simResult3$sampleSizes, tolerance = 1e-05) expect_equal(simResult3CodeBased$conditionalPowerAchieved, simResult3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult3), "character") df <- as.data.frame(simResult3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationEnrichmentMeans': gMax = 4", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:subsec:adaptiveClosedTestProcedureEnrichment} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:stratifiedtTestEnrichment} # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:simulationEnrichmentMeansGenerate} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} effects <- matrix(c(2.3, 3.1, 0.9, 1.2, 2.1, 3.4, 0.9, 0.2), byrow = TRUE, ncol = 8) effectList <- list(subGroups = c("S1", "S2", "S3", "S12", "S13", "S23", "S123", "R"), prevalences = c(0.1, 0.05, 0.1, 0.15, 0.1, 0.15, 0.3, 0.05), effects = effects, stDevs = c(rep(3.5, 4), rep(4.5, 4))) design <- getDesignInverseNormal(informationRates = c(0.4, 1), typeOfDesign = "noEarlyEfficacy") suppressWarnings(simResult1 <- getSimulationEnrichmentMeans(design, plannedSubjects = c(100, 200), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 2, seed = 123, typeOfSelection = "epsilon", epsilonValue = 0.15, adaptations = c(T), intersectionTest = "Bonferroni", stratifiedAnalysis = TRUE, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 50), maxNumberOfSubjectsPerStage = c(NA, 200), thetaH1 = 2, stDevH1 = 3 )) ## Comparison of the results of SimulationResultsEnrichmentMeans object 'simResult1' with expected results expect_equal(simResult1$iterations[1, ], 100) expect_equal(simResult1$iterations[2, ], 97) expect_equal(simResult1$rejectAtLeastOne, 0.54, tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$rejectedPopulationsPerStage)), c(0, 0.12, 0, 0.21, 0, 0.19, 0, 0.08), tolerance = 1e-07) expect_equal(simResult1$futilityPerStage[1, ], 0.03, tolerance = 1e-07) expect_equal(simResult1$earlyStop[1, ], 0.03, tolerance = 1e-07) expect_equal(simResult1$successPerStage[1, ], 0) expect_equal(simResult1$successPerStage[2, ], 0.5, tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$selectedPopulations)), c(1, 0.28, 1, 0.37, 1, 0.37, 1, 0.22), tolerance = 1e-07) expect_equal(simResult1$numberOfPopulations[1, ], 4) expect_equal(simResult1$numberOfPopulations[2, ], 1.2783505, tolerance = 1e-07) expect_equal(simResult1$expectedNumberOfSubjects, 165.08824, tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$sampleSizes)), c(10, 3.9329768, 5, 2.3736653, 10, 5.1412409, 15, 9.6615922, 10, 6.744047, 15, 11.418006, 30, 26.843598, 5, 0.98615092), tolerance = 1e-07) expect_equal(simResult1$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simResult1$conditionalPowerAchieved[2, ], 0.87059965, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult1), NA))) expect_output(print(simResult1)$show()) invisible(capture.output(expect_error(summary(simResult1), NA))) expect_output(summary(simResult1)$show()) suppressWarnings(simResult1CodeBased <- eval(parse(text = getObjectRCode(simResult1, stringWrapParagraphWidth = NULL)))) expect_equal(simResult1CodeBased$iterations, simResult1$iterations, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectAtLeastOne, simResult1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectedPopulationsPerStage, simResult1$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$futilityPerStage, simResult1$futilityPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$earlyStop, simResult1$earlyStop, tolerance = 1e-05) expect_equal(simResult1CodeBased$successPerStage, simResult1$successPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$selectedPopulations, simResult1$selectedPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$numberOfPopulations, simResult1$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$expectedNumberOfSubjects, simResult1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult1CodeBased$sampleSizes, simResult1$sampleSizes, tolerance = 1e-05) expect_equal(simResult1CodeBased$conditionalPowerAchieved, simResult1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult1), "character") df <- as.data.frame(simResult1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult2 <- getSimulationEnrichmentMeans(design, plannedSubjects = c(100, 200), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 2, seed = 123, typeOfSelection = "rbest", rValue = 2, adaptations = c(T), intersectionTest = "Sidak", stratifiedAnalysis = TRUE, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 50), maxNumberOfSubjectsPerStage = c(NA, 200), thetaH1 = 2, stDevH1 = 3 )) ## Comparison of the results of SimulationResultsEnrichmentMeans object 'simResult2' with expected results expect_equal(simResult2$iterations[1, ], 100) expect_equal(simResult2$iterations[2, ], 100) expect_equal(simResult2$rejectAtLeastOne, 0.55, tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$rejectedPopulationsPerStage)), c(0, 0.19, 0, 0.2, 0, 0.25, 0, 0.28), tolerance = 1e-07) expect_equal(simResult2$futilityPerStage[1, ], 0) expect_equal(simResult2$earlyStop[1, ], 0) expect_equal(simResult2$successPerStage[1, ], 0) expect_equal(simResult2$successPerStage[2, ], 0.37, tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$selectedPopulations)), c(1, 0.41, 1, 0.49, 1, 0.54, 1, 0.56), tolerance = 1e-07) expect_equal(simResult2$numberOfPopulations[1, ], 4) expect_equal(simResult2$numberOfPopulations[2, ], 2) expect_equal(simResult2$expectedNumberOfSubjects, 174.9744, tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$sampleSizes)), c(10, 6.8133223, 5, 3.3795954, 10, 6.9887063, 15, 11.878041, 10, 7.9186938, 15, 11.878041, 30, 23.756082, 5, 2.3619159), tolerance = 1e-07) expect_equal(simResult2$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simResult2$conditionalPowerAchieved[2, ], 0.81461286, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult2), NA))) expect_output(print(simResult2)$show()) invisible(capture.output(expect_error(summary(simResult2), NA))) expect_output(summary(simResult2)$show()) suppressWarnings(simResult2CodeBased <- eval(parse(text = getObjectRCode(simResult2, stringWrapParagraphWidth = NULL)))) expect_equal(simResult2CodeBased$iterations, simResult2$iterations, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectAtLeastOne, simResult2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectedPopulationsPerStage, simResult2$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$futilityPerStage, simResult2$futilityPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$earlyStop, simResult2$earlyStop, tolerance = 1e-05) expect_equal(simResult2CodeBased$successPerStage, simResult2$successPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$selectedPopulations, simResult2$selectedPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$numberOfPopulations, simResult2$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$expectedNumberOfSubjects, simResult2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(simResult2CodeBased$sampleSizes, simResult2$sampleSizes, tolerance = 1e-05) expect_equal(simResult2CodeBased$conditionalPowerAchieved, simResult2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult2), "character") df <- as.data.frame(simResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationEnrichmentMeans': comparison of base and enrichment for inverse normal", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:stratifiedtTestEnrichment} # @refFS[Formula]{fs:simulationEnrichmentMeansGenerate} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} effectSeq <- seq(0, 0.7, 0.1) effects <- matrix(effectSeq, byrow = TRUE, ncol = 1) effectList <- list(subGroups = "F", prevalences = 1, stDevs = 1.3, effects = effects) design <- getDesignInverseNormal(informationRates = c(0.3, 1), typeOfDesign = "OF", futilityBounds = c(0.1)) suppressWarnings(x1 <- getSimulationEnrichmentMeans(design, plannedSubjects = c(60, 180), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 2, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10), maxNumberOfSubjectsPerStage = c(NA, 180), thetaH1 = 0.5, seed = 123 )) x2 <- getSimulationMeans(design, plannedSubjects = c(60, 180), alternative = effectSeq, maxNumberOfIterations = 100, allocationRatioPlanned = 2, stDev = 1.3, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10), maxNumberOfSubjectsPerStage = c(NA, 180), thetaH1 = 0.5, seed = 123 ) comp1 <- x2$overallReject - x1$rejectAtLeastOne ## Comparison of the results of numeric object 'comp1' with expected results expect_equal(comp1, c(0.02, -0.01, 0.01, -0.03, 0.08, -0.05, 0.04, -0.03), tolerance = 1e-07) comp2 <- x2$conditionalPowerAchieved - x1$conditionalPowerAchieved ## Comparison of the results of matrixarray object 'comp2' with expected results expect_equal(comp2[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(comp2[2, ], c(0.0070281375, -0.0046190664, -0.020739941, -0.011327634, -0.0046695544, 0.0025709653, 0.0032941476, 0.0045055727), tolerance = 1e-07) comp3 <- x2$expectedNumberOfSubjects - x1$expectedNumberOfSubjects ## Comparison of the results of numeric object 'comp3' with expected results expect_equal(comp3, c(-5.9383973, -5.0998562, -5.4120322, 1.2304065, -6.6264122, -15.289639, -4.6069346, -0.41855064), tolerance = 1e-07) }) test_that("'getSimulationEnrichmentMeans': comparison of base and enrichment for Fisher combination", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:stratifiedtTestEnrichment} # @refFS[Formula]{fs:simulationEnrichmentMeansGenerate} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} effectSeq <- seq(0, 0.7, 0.1) effects <- matrix(effectSeq, byrow = TRUE, ncol = 1) effectList <- list(subGroups = "F", prevalences = 1, stDevs = 1.3, effects = effects) design <- getDesignFisher(informationRates = c(0.3, 1), kMax = 2) suppressWarnings(x1 <- getSimulationEnrichmentMeans(design, plannedSubjects = c(60, 180), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 2, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10), maxNumberOfSubjectsPerStage = c(NA, 180), thetaH1 = 0.5, seed = 123 )) x2 <- getSimulationMeans(design, plannedSubjects = c(60, 180), alternative = effectSeq, maxNumberOfIterations = 100, allocationRatioPlanned = 2, stDev = 1.3, conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(NA, 10), maxNumberOfSubjectsPerStage = c(NA, 180), thetaH1 = 0.5, seed = 123 ) comp4 <- x2$overallReject - x1$rejectAtLeastOne ## Comparison of the results of numeric object 'comp4' with expected results expect_equal(comp4, c(0, 0, 0.02, -0.01, 0.04, 0.03, 0.07, -0.01), tolerance = 1e-07) comp5 <- x2$conditionalPowerAchieved - x1$conditionalPowerAchieved ## Comparison of the results of matrixarray object 'comp5' with expected results expect_equal(comp5[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(comp5[2, ], c(0, -0.0026724666, -0.020506475, -0.0095136176, -0.01871572, -0.0085381669, -0.0011844682, -0.023030147), tolerance = 1e-07) comp6 <- x2$expectedNumberOfSubjects - x1$expectedNumberOfSubjects ## Comparison of the results of numeric object 'comp6' with expected results expect_equal(comp6, c(0, 3.5569071, 9.4761962, -1.6191689, -3.0007806, -12.622314, 2.072784, -19.12106), tolerance = 1e-07) }) rpact/tests/testthat/test-class_time.R0000644000176200001440000053710014370207346017624 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-class_time.R ## | Creation date: 06 February 2023, 12:04:17 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Class 'PiecewiseSurvivalTime'") test_that("Testing 'getPiecewiseSurvivalTime': isPiecewiseSurvivalEnabled()", { # @refFS[Tab.]{fs:tab:output:getPiecewiseSurvivalTime} expect_false(getPiecewiseSurvivalTime()$isPiecewiseSurvivalEnabled()) expect_false(getPiecewiseSurvivalTime(piecewiseSurvivalTime = NA)$isPiecewiseSurvivalEnabled()) }) test_that("Testing 'getPiecewiseSurvivalTime': simple vector based definition", { # @refFS[Tab.]{fs:tab:output:getPiecewiseSurvivalTime} pwSurvivalTime1 <- getPiecewiseSurvivalTime(lambda2 = 0.5, hazardRatio = 0.8) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime1' with expected results expect_equal(pwSurvivalTime1$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime1$lambda1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime1$lambda2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime1$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime1$pi1, NA_real_) expect_equal(pwSurvivalTime1$pi2, NA_real_) expect_equal(pwSurvivalTime1$median1, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime1$median2, 1.3862944, tolerance = 1e-07) expect_equal(pwSurvivalTime1$eventTime, NA_real_) expect_equal(pwSurvivalTime1$kappa, 1) expect_equal(pwSurvivalTime1$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime1$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime1$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime1), NA))) expect_output(print(pwSurvivalTime1)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime1), NA))) expect_output(summary(pwSurvivalTime1)$show()) pwSurvivalTime1CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime1, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime1CodeBased$piecewiseSurvivalTime, pwSurvivalTime1$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$lambda1, pwSurvivalTime1$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$lambda2, pwSurvivalTime1$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$hazardRatio, pwSurvivalTime1$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$pi1, pwSurvivalTime1$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$pi2, pwSurvivalTime1$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$median1, pwSurvivalTime1$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$median2, pwSurvivalTime1$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$eventTime, pwSurvivalTime1$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$kappa, pwSurvivalTime1$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime1$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$delayedResponseAllowed, pwSurvivalTime1$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$delayedResponseEnabled, pwSurvivalTime1$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime1), "character") df <- as.data.frame(pwSurvivalTime1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime2 <- getPiecewiseSurvivalTime(lambda2 = 0.5, lambda1 = 0.4) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime2' with expected results expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime2$lambda1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime2$lambda2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime2$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi1, NA_real_) expect_equal(pwSurvivalTime2$pi2, NA_real_) expect_equal(pwSurvivalTime2$median1, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median2, 1.3862944, tolerance = 1e-07) expect_equal(pwSurvivalTime2$eventTime, NA_real_) expect_equal(pwSurvivalTime2$kappa, 1) expect_equal(pwSurvivalTime2$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime2$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime2$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime2), NA))) expect_output(print(pwSurvivalTime2)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime2), NA))) expect_output(summary(pwSurvivalTime2)$show()) pwSurvivalTime2CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime2, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime2CodeBased$piecewiseSurvivalTime, pwSurvivalTime2$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$lambda1, pwSurvivalTime2$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$lambda2, pwSurvivalTime2$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$hazardRatio, pwSurvivalTime2$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$pi1, pwSurvivalTime2$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$pi2, pwSurvivalTime2$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$median1, pwSurvivalTime2$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$median2, pwSurvivalTime2$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$eventTime, pwSurvivalTime2$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$kappa, pwSurvivalTime2$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime2$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$delayedResponseAllowed, pwSurvivalTime2$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$delayedResponseEnabled, pwSurvivalTime2$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime2), "character") df <- as.data.frame(pwSurvivalTime2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() pwSurvivalTime2 <- getPiecewiseSurvivalTime(pi2 = 0.5, hazardRatio = 0.8) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime2' with expected results expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime2$lambda1, 0.046209812, tolerance = 1e-07) expect_equal(pwSurvivalTime2$lambda2, 0.057762265, tolerance = 1e-07) expect_equal(pwSurvivalTime2$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi1, 0.42565082, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median1, 15) expect_equal(pwSurvivalTime2$median2, 12) expect_equal(pwSurvivalTime2$eventTime, 12) expect_equal(pwSurvivalTime2$kappa, 1) expect_equal(pwSurvivalTime2$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime2$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime2$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime2), NA))) expect_output(print(pwSurvivalTime2)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime2), NA))) expect_output(summary(pwSurvivalTime2)$show()) pwSurvivalTime2CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime2, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime2CodeBased$piecewiseSurvivalTime, pwSurvivalTime2$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$lambda1, pwSurvivalTime2$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$lambda2, pwSurvivalTime2$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$hazardRatio, pwSurvivalTime2$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$pi1, pwSurvivalTime2$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$pi2, pwSurvivalTime2$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$median1, pwSurvivalTime2$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$median2, pwSurvivalTime2$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$eventTime, pwSurvivalTime2$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$kappa, pwSurvivalTime2$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime2$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$delayedResponseAllowed, pwSurvivalTime2$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$delayedResponseEnabled, pwSurvivalTime2$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime2), "character") df <- as.data.frame(pwSurvivalTime2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime2 <- getPiecewiseSurvivalTime(pi2 = 0.5, pi1 = 0.4) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime2' with expected results expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime2$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(pwSurvivalTime2$lambda2, 0.057762265, tolerance = 1e-07) expect_equal(pwSurvivalTime2$hazardRatio, 0.73696559, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median1, 16.282985, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median2, 12) expect_equal(pwSurvivalTime2$eventTime, 12) expect_equal(pwSurvivalTime2$kappa, 1) expect_equal(pwSurvivalTime2$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime2$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime2$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime2), NA))) expect_output(print(pwSurvivalTime2)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime2), NA))) expect_output(summary(pwSurvivalTime2)$show()) pwSurvivalTime2CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime2, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime2CodeBased$piecewiseSurvivalTime, pwSurvivalTime2$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$lambda1, pwSurvivalTime2$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$lambda2, pwSurvivalTime2$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$hazardRatio, pwSurvivalTime2$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$pi1, pwSurvivalTime2$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$pi2, pwSurvivalTime2$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$median1, pwSurvivalTime2$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$median2, pwSurvivalTime2$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$eventTime, pwSurvivalTime2$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$kappa, pwSurvivalTime2$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime2$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$delayedResponseAllowed, pwSurvivalTime2$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$delayedResponseEnabled, pwSurvivalTime2$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime2), "character") df <- as.data.frame(pwSurvivalTime2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime3 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime3' with expected results expect_equal(pwSurvivalTime3$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime3$lambda1, c(0.24, 0.32), tolerance = 1e-07) expect_equal(pwSurvivalTime3$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime3$hazardRatio, c(0.6, 0.8), tolerance = 1e-07) expect_equal(pwSurvivalTime3$pi1, NA_real_) expect_equal(pwSurvivalTime3$pi2, NA_real_) expect_equal(pwSurvivalTime3$median1, c(2.8881133, 2.1660849), tolerance = 1e-07) expect_equal(pwSurvivalTime3$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime3$eventTime, NA_real_) expect_equal(pwSurvivalTime3$kappa, 1) expect_equal(pwSurvivalTime3$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime3$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime3$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime3), NA))) expect_output(print(pwSurvivalTime3)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime3), NA))) expect_output(summary(pwSurvivalTime3)$show()) pwSurvivalTime3CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime3, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime3CodeBased$piecewiseSurvivalTime, pwSurvivalTime3$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$lambda1, pwSurvivalTime3$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$lambda2, pwSurvivalTime3$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$hazardRatio, pwSurvivalTime3$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$pi1, pwSurvivalTime3$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$pi2, pwSurvivalTime3$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$median1, pwSurvivalTime3$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$median2, pwSurvivalTime3$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$eventTime, pwSurvivalTime3$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$kappa, pwSurvivalTime3$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime3$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$delayedResponseAllowed, pwSurvivalTime3$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$delayedResponseEnabled, pwSurvivalTime3$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime3), "character") df <- as.data.frame(pwSurvivalTime3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime8 <- getPiecewiseSurvivalTime(pi2 = 0.4, pi1 = 0.3) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime8' with expected results expect_equal(pwSurvivalTime8$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime8$lambda1, 0.029722912, tolerance = 1e-07) expect_equal(pwSurvivalTime8$lambda2, 0.042568802, tolerance = 1e-07) expect_equal(pwSurvivalTime8$hazardRatio, 0.69823229, tolerance = 1e-07) expect_equal(pwSurvivalTime8$pi1, 0.3, tolerance = 1e-07) expect_equal(pwSurvivalTime8$pi2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime8$median1, 23.320299, tolerance = 1e-07) expect_equal(pwSurvivalTime8$median2, 16.282985, tolerance = 1e-07) expect_equal(pwSurvivalTime8$eventTime, 12) expect_equal(pwSurvivalTime8$kappa, 1) expect_equal(pwSurvivalTime8$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime8$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime8$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime8), NA))) expect_output(print(pwSurvivalTime8)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime8), NA))) expect_output(summary(pwSurvivalTime8)$show()) pwSurvivalTime8CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime8, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime8CodeBased$piecewiseSurvivalTime, pwSurvivalTime8$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$lambda1, pwSurvivalTime8$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$lambda2, pwSurvivalTime8$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$hazardRatio, pwSurvivalTime8$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$pi1, pwSurvivalTime8$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$pi2, pwSurvivalTime8$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$median1, pwSurvivalTime8$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$median2, pwSurvivalTime8$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$eventTime, pwSurvivalTime8$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$kappa, pwSurvivalTime8$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime8$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$delayedResponseAllowed, pwSurvivalTime8$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime8CodeBased$delayedResponseEnabled, pwSurvivalTime8$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime8), "character") df <- as.data.frame(pwSurvivalTime8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime9 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi2 = 0.3) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime9' with expected results expect_equal(pwSurvivalTime9$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime9$lambda1, c(0.017833747, 0.02377833), tolerance = 1e-07) expect_equal(pwSurvivalTime9$lambda2, 0.029722912, tolerance = 1e-07) expect_equal(pwSurvivalTime9$hazardRatio, c(0.6, 0.8), tolerance = 1e-07) expect_equal(pwSurvivalTime9$pi1, c(0.19265562, 0.24824135), tolerance = 1e-07) expect_equal(pwSurvivalTime9$pi2, 0.3, tolerance = 1e-07) expect_equal(pwSurvivalTime9$median1, c(38.867164, 29.150373), tolerance = 1e-07) expect_equal(pwSurvivalTime9$median2, 23.320299, tolerance = 1e-07) expect_equal(pwSurvivalTime9$eventTime, 12) expect_equal(pwSurvivalTime9$kappa, 1) expect_equal(pwSurvivalTime9$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime9$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime9$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime9), NA))) expect_output(print(pwSurvivalTime9)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime9), NA))) expect_output(summary(pwSurvivalTime9)$show()) pwSurvivalTime9CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime9, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime9CodeBased$piecewiseSurvivalTime, pwSurvivalTime9$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$lambda1, pwSurvivalTime9$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$lambda2, pwSurvivalTime9$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$hazardRatio, pwSurvivalTime9$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$pi1, pwSurvivalTime9$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$pi2, pwSurvivalTime9$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$median1, pwSurvivalTime9$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$median2, pwSurvivalTime9$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$eventTime, pwSurvivalTime9$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$kappa, pwSurvivalTime9$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime9$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$delayedResponseAllowed, pwSurvivalTime9$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime9CodeBased$delayedResponseEnabled, pwSurvivalTime9$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime9), "character") df <- as.data.frame(pwSurvivalTime9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime10 <- getPiecewiseSurvivalTime(median2 = 1.386294, hazardRatio = 0.8) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime10' with expected results expect_equal(pwSurvivalTime10$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime10$lambda1, 0.4000001, tolerance = 1e-07) expect_equal(pwSurvivalTime10$lambda2, 0.50000013, tolerance = 1e-07) expect_equal(pwSurvivalTime10$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime10$pi1, NA_real_) expect_equal(pwSurvivalTime10$pi2, NA_real_) expect_equal(pwSurvivalTime10$median1, 1.7328675, tolerance = 1e-07) expect_equal(pwSurvivalTime10$median2, 1.386294, tolerance = 1e-07) expect_equal(pwSurvivalTime10$eventTime, NA_real_) expect_equal(pwSurvivalTime10$kappa, 1) expect_equal(pwSurvivalTime10$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime10$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime10$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime10), NA))) expect_output(print(pwSurvivalTime10)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime10), NA))) expect_output(summary(pwSurvivalTime10)$show()) pwSurvivalTime10CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime10, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime10CodeBased$piecewiseSurvivalTime, pwSurvivalTime10$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$lambda1, pwSurvivalTime10$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$lambda2, pwSurvivalTime10$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$hazardRatio, pwSurvivalTime10$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$pi1, pwSurvivalTime10$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$pi2, pwSurvivalTime10$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$median1, pwSurvivalTime10$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$median2, pwSurvivalTime10$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$eventTime, pwSurvivalTime10$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$kappa, pwSurvivalTime10$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime10$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$delayedResponseAllowed, pwSurvivalTime10$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$delayedResponseEnabled, pwSurvivalTime10$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime10), "character") df <- as.data.frame(pwSurvivalTime10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime11 <- getPiecewiseSurvivalTime(median2 = 1.386294, lambda1 = 0.4) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime11' with expected results expect_equal(pwSurvivalTime11$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime11$lambda1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime11$lambda2, 0.50000013, tolerance = 1e-07) expect_equal(pwSurvivalTime11$hazardRatio, 0.79999979, tolerance = 1e-07) expect_equal(pwSurvivalTime11$pi1, NA_real_) expect_equal(pwSurvivalTime11$pi2, NA_real_) expect_equal(pwSurvivalTime11$median1, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime11$median2, 1.386294, tolerance = 1e-07) expect_equal(pwSurvivalTime11$eventTime, NA_real_) expect_equal(pwSurvivalTime11$kappa, 1) expect_equal(pwSurvivalTime11$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime11$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime11$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime11), NA))) expect_output(print(pwSurvivalTime11)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime11), NA))) expect_output(summary(pwSurvivalTime11)$show()) pwSurvivalTime11CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime11, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime11CodeBased$piecewiseSurvivalTime, pwSurvivalTime11$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$lambda1, pwSurvivalTime11$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$lambda2, pwSurvivalTime11$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$hazardRatio, pwSurvivalTime11$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$pi1, pwSurvivalTime11$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$pi2, pwSurvivalTime11$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$median1, pwSurvivalTime11$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$median2, pwSurvivalTime11$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$eventTime, pwSurvivalTime11$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$kappa, pwSurvivalTime11$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime11$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$delayedResponseAllowed, pwSurvivalTime11$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$delayedResponseEnabled, pwSurvivalTime11$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime11), "character") df <- as.data.frame(pwSurvivalTime11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime12 <- getPiecewiseSurvivalTime(median2 = 5, median1 = 6) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime12' with expected results expect_equal(pwSurvivalTime12$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime12$lambda1, 0.11552453, tolerance = 1e-07) expect_equal(pwSurvivalTime12$lambda2, 0.13862944, tolerance = 1e-07) expect_equal(pwSurvivalTime12$hazardRatio, 0.83333333, tolerance = 1e-07) expect_equal(pwSurvivalTime12$pi1, NA_real_) expect_equal(pwSurvivalTime12$pi2, NA_real_) expect_equal(pwSurvivalTime12$median1, 6) expect_equal(pwSurvivalTime12$median2, 5) expect_equal(pwSurvivalTime12$eventTime, NA_real_) expect_equal(pwSurvivalTime12$kappa, 1) expect_equal(pwSurvivalTime12$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime12$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime12$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime12), NA))) expect_output(print(pwSurvivalTime12)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime12), NA))) expect_output(summary(pwSurvivalTime12)$show()) pwSurvivalTime12CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime12, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime12CodeBased$piecewiseSurvivalTime, pwSurvivalTime12$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$lambda1, pwSurvivalTime12$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$lambda2, pwSurvivalTime12$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$hazardRatio, pwSurvivalTime12$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$pi1, pwSurvivalTime12$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$pi2, pwSurvivalTime12$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$median1, pwSurvivalTime12$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$median2, pwSurvivalTime12$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$eventTime, pwSurvivalTime12$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$kappa, pwSurvivalTime12$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime12$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$delayedResponseAllowed, pwSurvivalTime12$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$delayedResponseEnabled, pwSurvivalTime12$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime12), "character") df <- as.data.frame(pwSurvivalTime12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime13 <- getPiecewiseSurvivalTime(median2 = 1.386294, lambda1 = c(0.3, 0.4)) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime13' with expected results expect_equal(pwSurvivalTime13$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime13$lambda1, c(0.3, 0.4), tolerance = 1e-07) expect_equal(pwSurvivalTime13$lambda2, 0.50000013, tolerance = 1e-07) expect_equal(pwSurvivalTime13$hazardRatio, c(0.59999984, 0.79999979), tolerance = 1e-07) expect_equal(pwSurvivalTime13$pi1, NA_real_) expect_equal(pwSurvivalTime13$pi2, NA_real_) expect_equal(pwSurvivalTime13$median1, c(2.3104906, 1.732868), tolerance = 1e-07) expect_equal(pwSurvivalTime13$median2, 1.386294, tolerance = 1e-07) expect_equal(pwSurvivalTime13$eventTime, NA_real_) expect_equal(pwSurvivalTime13$kappa, 1) expect_equal(pwSurvivalTime13$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime13$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime13$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime13), NA))) expect_output(print(pwSurvivalTime13)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime13), NA))) expect_output(summary(pwSurvivalTime13)$show()) pwSurvivalTime13CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime13, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime13CodeBased$piecewiseSurvivalTime, pwSurvivalTime13$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$lambda1, pwSurvivalTime13$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$lambda2, pwSurvivalTime13$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$hazardRatio, pwSurvivalTime13$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$pi1, pwSurvivalTime13$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$pi2, pwSurvivalTime13$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$median1, pwSurvivalTime13$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$median2, pwSurvivalTime13$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$eventTime, pwSurvivalTime13$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$kappa, pwSurvivalTime13$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime13$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$delayedResponseAllowed, pwSurvivalTime13$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$delayedResponseEnabled, pwSurvivalTime13$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime13), "character") df <- as.data.frame(pwSurvivalTime13) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime13) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime14 <- getPiecewiseSurvivalTime(median2 = 5, median1 = c(6:8)) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime14' with expected results expect_equal(pwSurvivalTime14$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime14$lambda1, c(0.11552453, 0.099021026, 0.086643398), tolerance = 1e-07) expect_equal(pwSurvivalTime14$lambda2, 0.13862944, tolerance = 1e-07) expect_equal(pwSurvivalTime14$hazardRatio, c(0.83333333, 0.71428571, 0.625), tolerance = 1e-07) expect_equal(pwSurvivalTime14$pi1, NA_real_) expect_equal(pwSurvivalTime14$pi2, NA_real_) expect_equal(pwSurvivalTime14$median1, c(6, 7, 8)) expect_equal(pwSurvivalTime14$median2, 5) expect_equal(pwSurvivalTime14$eventTime, NA_real_) expect_equal(pwSurvivalTime14$kappa, 1) expect_equal(pwSurvivalTime14$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime14$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime14$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime14), NA))) expect_output(print(pwSurvivalTime14)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime14), NA))) expect_output(summary(pwSurvivalTime14)$show()) pwSurvivalTime14CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime14, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime14CodeBased$piecewiseSurvivalTime, pwSurvivalTime14$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$lambda1, pwSurvivalTime14$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$lambda2, pwSurvivalTime14$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$hazardRatio, pwSurvivalTime14$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$pi1, pwSurvivalTime14$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$pi2, pwSurvivalTime14$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$median1, pwSurvivalTime14$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$median2, pwSurvivalTime14$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$eventTime, pwSurvivalTime14$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$kappa, pwSurvivalTime14$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime14$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$delayedResponseAllowed, pwSurvivalTime14$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime14CodeBased$delayedResponseEnabled, pwSurvivalTime14$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime14), "character") df <- as.data.frame(pwSurvivalTime14) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime14) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime15 <- getPiecewiseSurvivalTime(median2 = 2, hazardRatio = 0.8) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime15' with expected results expect_equal(pwSurvivalTime15$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime15$lambda1, 0.27725887, tolerance = 1e-07) expect_equal(pwSurvivalTime15$lambda2, 0.34657359, tolerance = 1e-07) expect_equal(pwSurvivalTime15$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime15$pi1, NA_real_) expect_equal(pwSurvivalTime15$pi2, NA_real_) expect_equal(pwSurvivalTime15$median1, 2.5, tolerance = 1e-07) expect_equal(pwSurvivalTime15$median2, 2) expect_equal(pwSurvivalTime15$eventTime, NA_real_) expect_equal(pwSurvivalTime15$kappa, 1) expect_equal(pwSurvivalTime15$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime15$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime15$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime15), NA))) expect_output(print(pwSurvivalTime15)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime15), NA))) expect_output(summary(pwSurvivalTime15)$show()) pwSurvivalTime15CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime15, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime15CodeBased$piecewiseSurvivalTime, pwSurvivalTime15$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$lambda1, pwSurvivalTime15$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$lambda2, pwSurvivalTime15$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$hazardRatio, pwSurvivalTime15$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$pi1, pwSurvivalTime15$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$pi2, pwSurvivalTime15$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$median1, pwSurvivalTime15$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$median2, pwSurvivalTime15$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$eventTime, pwSurvivalTime15$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$kappa, pwSurvivalTime15$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime15$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$delayedResponseAllowed, pwSurvivalTime15$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime15CodeBased$delayedResponseEnabled, pwSurvivalTime15$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime15), "character") df <- as.data.frame(pwSurvivalTime15) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime15) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime16 <- getPiecewiseSurvivalTime(median1 = c(2, 2), hazardRatio = c(1.4, 1.4)) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime16' with expected results expect_equal(pwSurvivalTime16$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime16$lambda1, c(0.34657359, 0.34657359), tolerance = 1e-07) expect_equal(pwSurvivalTime16$lambda2, c(0.24755256, 0.24755256), tolerance = 1e-07) expect_equal(pwSurvivalTime16$hazardRatio, c(1.4, 1.4), tolerance = 1e-07) expect_equal(pwSurvivalTime16$pi1, NA_real_) expect_equal(pwSurvivalTime16$pi2, NA_real_) expect_equal(pwSurvivalTime16$median1, c(2, 2)) expect_equal(pwSurvivalTime16$median2, c(2.8, 2.8), tolerance = 1e-07) expect_equal(pwSurvivalTime16$eventTime, NA_real_) expect_equal(pwSurvivalTime16$kappa, 1) expect_equal(pwSurvivalTime16$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime16$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime16$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime16), NA))) expect_output(print(pwSurvivalTime16)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime16), NA))) expect_output(summary(pwSurvivalTime16)$show()) pwSurvivalTime16CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime16, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime16CodeBased$piecewiseSurvivalTime, pwSurvivalTime16$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$lambda1, pwSurvivalTime16$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$lambda2, pwSurvivalTime16$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$hazardRatio, pwSurvivalTime16$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$pi1, pwSurvivalTime16$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$pi2, pwSurvivalTime16$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$median1, pwSurvivalTime16$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$median2, pwSurvivalTime16$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$eventTime, pwSurvivalTime16$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$kappa, pwSurvivalTime16$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime16$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$delayedResponseAllowed, pwSurvivalTime16$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime16CodeBased$delayedResponseEnabled, pwSurvivalTime16$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime16), "character") df <- as.data.frame(pwSurvivalTime16) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime16) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime17 <- getPiecewiseSurvivalTime(median1 = c(2, 3), median2 = 4) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime17' with expected results expect_equal(pwSurvivalTime17$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime17$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime17$lambda2, 0.1732868, tolerance = 1e-07) expect_equal(pwSurvivalTime17$hazardRatio, c(2, 1.3333333), tolerance = 1e-07) expect_equal(pwSurvivalTime17$pi1, NA_real_) expect_equal(pwSurvivalTime17$pi2, NA_real_) expect_equal(pwSurvivalTime17$median1, c(2, 3)) expect_equal(pwSurvivalTime17$median2, 4) expect_equal(pwSurvivalTime17$eventTime, NA_real_) expect_equal(pwSurvivalTime17$kappa, 1) expect_equal(pwSurvivalTime17$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime17$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime17$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime17), NA))) expect_output(print(pwSurvivalTime17)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime17), NA))) expect_output(summary(pwSurvivalTime17)$show()) pwSurvivalTime17CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime17, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime17CodeBased$piecewiseSurvivalTime, pwSurvivalTime17$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$lambda1, pwSurvivalTime17$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$lambda2, pwSurvivalTime17$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$hazardRatio, pwSurvivalTime17$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$pi1, pwSurvivalTime17$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$pi2, pwSurvivalTime17$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$median1, pwSurvivalTime17$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$median2, pwSurvivalTime17$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$eventTime, pwSurvivalTime17$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$kappa, pwSurvivalTime17$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime17$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$delayedResponseAllowed, pwSurvivalTime17$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime17CodeBased$delayedResponseEnabled, pwSurvivalTime17$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime17), "character") df <- as.data.frame(pwSurvivalTime17) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime17) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime18 <- getPiecewiseSurvivalTime(median1 = c(2, 3), lambda2 = 0.4) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime18' with expected results expect_equal(pwSurvivalTime18$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime18$lambda1, c(0.34657359, 0.23104906), tolerance = 1e-07) expect_equal(pwSurvivalTime18$lambda2, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime18$hazardRatio, c(0.86643398, 0.57762265), tolerance = 1e-07) expect_equal(pwSurvivalTime18$pi1, NA_real_) expect_equal(pwSurvivalTime18$pi2, NA_real_) expect_equal(pwSurvivalTime18$median1, c(2, 3)) expect_equal(pwSurvivalTime18$median2, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime18$eventTime, NA_real_) expect_equal(pwSurvivalTime18$kappa, 1) expect_equal(pwSurvivalTime18$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime18$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime18$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime18), NA))) expect_output(print(pwSurvivalTime18)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime18), NA))) expect_output(summary(pwSurvivalTime18)$show()) pwSurvivalTime18CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime18, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime18CodeBased$piecewiseSurvivalTime, pwSurvivalTime18$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$lambda1, pwSurvivalTime18$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$lambda2, pwSurvivalTime18$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$hazardRatio, pwSurvivalTime18$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$pi1, pwSurvivalTime18$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$pi2, pwSurvivalTime18$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$median1, pwSurvivalTime18$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$median2, pwSurvivalTime18$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$eventTime, pwSurvivalTime18$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$kappa, pwSurvivalTime18$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime18$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$delayedResponseAllowed, pwSurvivalTime18$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime18CodeBased$delayedResponseEnabled, pwSurvivalTime18$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime18), "character") df <- as.data.frame(pwSurvivalTime18) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime18) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime19 <- getPiecewiseSurvivalTime(pi1 = 0.45) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime19' with expected results expect_equal(pwSurvivalTime19$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime19$lambda1, 0.04981975, tolerance = 1e-07) expect_equal(pwSurvivalTime19$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(pwSurvivalTime19$hazardRatio, 2.6791588, tolerance = 1e-07) expect_equal(pwSurvivalTime19$pi1, 0.45, tolerance = 1e-07) expect_equal(pwSurvivalTime19$pi2, 0.2, tolerance = 1e-07) expect_equal(pwSurvivalTime19$median1, 13.9131, tolerance = 1e-07) expect_equal(pwSurvivalTime19$median2, 37.275405, tolerance = 1e-07) expect_equal(pwSurvivalTime19$eventTime, 12) expect_equal(pwSurvivalTime19$kappa, 1) expect_equal(pwSurvivalTime19$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime19$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime19$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime19), NA))) expect_output(print(pwSurvivalTime19)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime19), NA))) expect_output(summary(pwSurvivalTime19)$show()) pwSurvivalTime19CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime19, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime19CodeBased$piecewiseSurvivalTime, pwSurvivalTime19$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$lambda1, pwSurvivalTime19$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$lambda2, pwSurvivalTime19$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$hazardRatio, pwSurvivalTime19$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$pi1, pwSurvivalTime19$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$pi2, pwSurvivalTime19$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$median1, pwSurvivalTime19$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$median2, pwSurvivalTime19$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$eventTime, pwSurvivalTime19$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$kappa, pwSurvivalTime19$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime19$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$delayedResponseAllowed, pwSurvivalTime19$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime19CodeBased$delayedResponseEnabled, pwSurvivalTime19$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime19), "character") df <- as.data.frame(pwSurvivalTime19) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime19) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime20 <- getPiecewiseSurvivalTime(median1 = c(2, 4), hazardRatio = c(1.4, 0.7)) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime20' with expected results expect_equal(pwSurvivalTime20$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime20$lambda1, c(0.34657359, 0.1732868), tolerance = 1e-07) expect_equal(pwSurvivalTime20$lambda2, c(0.24755256, 0.24755256), tolerance = 1e-07) expect_equal(pwSurvivalTime20$hazardRatio, c(1.4, 0.7), tolerance = 1e-07) expect_equal(pwSurvivalTime20$pi1, NA_real_) expect_equal(pwSurvivalTime20$pi2, NA_real_) expect_equal(pwSurvivalTime20$median1, c(2, 4)) expect_equal(pwSurvivalTime20$median2, c(2.8, 2.8), tolerance = 1e-07) expect_equal(pwSurvivalTime20$eventTime, NA_real_) expect_equal(pwSurvivalTime20$kappa, 1) expect_equal(pwSurvivalTime20$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime20$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime20$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime20), NA))) expect_output(print(pwSurvivalTime20)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime20), NA))) expect_output(summary(pwSurvivalTime20)$show()) pwSurvivalTime20CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime20, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime20CodeBased$piecewiseSurvivalTime, pwSurvivalTime20$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$lambda1, pwSurvivalTime20$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$lambda2, pwSurvivalTime20$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$hazardRatio, pwSurvivalTime20$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$pi1, pwSurvivalTime20$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$pi2, pwSurvivalTime20$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$median1, pwSurvivalTime20$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$median2, pwSurvivalTime20$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$eventTime, pwSurvivalTime20$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$kappa, pwSurvivalTime20$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime20$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$delayedResponseAllowed, pwSurvivalTime20$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime20CodeBased$delayedResponseEnabled, pwSurvivalTime20$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime20), "character") df <- as.data.frame(pwSurvivalTime20) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime20) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime21 <- getPiecewiseSurvivalTime(median1 = 3, hazardRatio = 0.8) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime21' with expected results expect_equal(pwSurvivalTime21$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime21$lambda1, 0.23104906, tolerance = 1e-07) expect_equal(pwSurvivalTime21$lambda2, 0.28881133, tolerance = 1e-07) expect_equal(pwSurvivalTime21$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime21$pi1, NA_real_) expect_equal(pwSurvivalTime21$pi2, NA_real_) expect_equal(pwSurvivalTime21$median1, 3) expect_equal(pwSurvivalTime21$median2, 2.4, tolerance = 1e-07) expect_equal(pwSurvivalTime21$eventTime, NA_real_) expect_equal(pwSurvivalTime21$kappa, 1) expect_equal(pwSurvivalTime21$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime21$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime21$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime21), NA))) expect_output(print(pwSurvivalTime21)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime21), NA))) expect_output(summary(pwSurvivalTime21)$show()) pwSurvivalTime21CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime21, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime21CodeBased$piecewiseSurvivalTime, pwSurvivalTime21$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$lambda1, pwSurvivalTime21$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$lambda2, pwSurvivalTime21$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$hazardRatio, pwSurvivalTime21$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$pi1, pwSurvivalTime21$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$pi2, pwSurvivalTime21$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$median1, pwSurvivalTime21$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$median2, pwSurvivalTime21$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$eventTime, pwSurvivalTime21$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$kappa, pwSurvivalTime21$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime21$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$delayedResponseAllowed, pwSurvivalTime21$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime21CodeBased$delayedResponseEnabled, pwSurvivalTime21$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime21), "character") df <- as.data.frame(pwSurvivalTime21) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime21) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } expect_error(getPiecewiseSurvivalTime(median2 = 1.386294, lambda2 = 0.4, hazardRatio = 0.8)) expect_error(getPiecewiseSurvivalTime(median2 = c(1.5, 1.7), lambda1 = c(0.3, 0.4))) expect_error(getPiecewiseSurvivalTime(median1 = c(2, 4), hazardRatio = c(1, 0.7))) expect_error(getPiecewiseSurvivalTime(median1 = c(2, 4), hazardRatio = 0.7)) }) test_that("Testing 'getPiecewiseSurvivalTime': vector based definition", { # @refFS[Tab.]{fs:tab:output:getPiecewiseSurvivalTime} pwSurvivalTime1 <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), hazardRatio = 0.8 ) expect_equal(pwSurvivalTime1$hazardRatio, 0.8) expect_equal(pwSurvivalTime1$lambda1, c(0.025, 0.04, 0.015) * 0.8) expect_false(pwSurvivalTime1$isDelayedResponseEnabled()) .skipTestIfDisabled() pwSurvivalTime2 <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.1, 0.2, 0.8), hazardRatio = 0.8 ) expect_true(pwSurvivalTime2$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime2$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime2$hazardRatio, 0.8) expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(pwSurvivalTime2$lambda2, c(0.1, 0.2, 0.8)) pwSurvivalTime3 <- getPiecewiseSurvivalTime(c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = 0.8) expect_true(pwSurvivalTime3$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime3$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime3$hazardRatio, 0.8) expect_equal(pwSurvivalTime3$piecewiseSurvivalTime, c(0, 6)) expect_equal(pwSurvivalTime3$lambda2, c(0.01, 0.03)) pwSurvivalTime4 <- getPiecewiseSurvivalTime(0, lambda2 = 0.01, hazardRatio = 0.8) expect_true(pwSurvivalTime4$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime4$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime4$hazardRatio, 0.8) expect_equal(pwSurvivalTime4$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime4$lambda2, 0.01) expect_equal(pwSurvivalTime4$lambda1, 0.01 * 0.8) pwSurvivalTime5 <- getPiecewiseSurvivalTime(NA_real_, lambda2 = 0.01, hazardRatio = 0.8) expect_true(pwSurvivalTime5$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime5$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime5$hazardRatio, 0.8) expect_equal(pwSurvivalTime5$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime5$lambda2, 0.01) expect_equal(pwSurvivalTime5$lambda1, 0.01 * 0.8) pwSurvivalTime6 <- getPiecewiseSurvivalTime(0, lambda2 = 0.01, lambda1 = 0.008) expect_true(pwSurvivalTime6$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime6$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime6$hazardRatio, 0.8) expect_equal(pwSurvivalTime6$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime6$lambda2, 0.01) expect_equal(pwSurvivalTime6$lambda1, 0.008) pwSurvivalTime7 <- getPiecewiseSurvivalTime(NA_real_, lambda2 = 0.01, lambda1 = 0.008) expect_true(pwSurvivalTime7$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime7$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime7$hazardRatio, 0.8) expect_equal(pwSurvivalTime7$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime7$lambda2, 0.01) expect_equal(pwSurvivalTime7$lambda1, 0.008) # case 2.2 pwSurvivalTime9 <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), lambda1 = c(0.025, 0.04, 0.015) * 0.8 ) expect_true(pwSurvivalTime9$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime9$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime9$hazardRatio, 0.8) pwSurvivalTime10 <- getPiecewiseSurvivalTime(lambda2 = 0.025, hazardRatio = 0.8) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime10' with expected results expect_equal(pwSurvivalTime10$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime10$lambda1, 0.02, tolerance = 1e-07) expect_equal(pwSurvivalTime10$lambda2, 0.025, tolerance = 1e-07) expect_equal(pwSurvivalTime10$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime10$pi1, NA_real_) expect_equal(pwSurvivalTime10$pi2, NA_real_) expect_equal(pwSurvivalTime10$median1, 34.657359, tolerance = 1e-07) expect_equal(pwSurvivalTime10$median2, 27.725887, tolerance = 1e-07) expect_equal(pwSurvivalTime10$eventTime, NA_real_) expect_equal(pwSurvivalTime10$kappa, 1) expect_equal(pwSurvivalTime10$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime10$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime10$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime10), NA))) expect_output(print(pwSurvivalTime10)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime10), NA))) expect_output(summary(pwSurvivalTime10)$show()) pwSurvivalTime10CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime10, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime10CodeBased$piecewiseSurvivalTime, pwSurvivalTime10$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$lambda1, pwSurvivalTime10$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$lambda2, pwSurvivalTime10$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$hazardRatio, pwSurvivalTime10$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$pi1, pwSurvivalTime10$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$pi2, pwSurvivalTime10$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$median1, pwSurvivalTime10$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$median2, pwSurvivalTime10$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$eventTime, pwSurvivalTime10$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$kappa, pwSurvivalTime10$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime10$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$delayedResponseAllowed, pwSurvivalTime10$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime10CodeBased$delayedResponseEnabled, pwSurvivalTime10$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime10), "character") df <- as.data.frame(pwSurvivalTime10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime11 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = 0, lambda2 = 0.025, hazardRatio = 0.8) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime11' with expected results expect_equal(pwSurvivalTime11$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime11$lambda1, 0.02, tolerance = 1e-07) expect_equal(pwSurvivalTime11$lambda2, 0.025, tolerance = 1e-07) expect_equal(pwSurvivalTime11$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime11$pi1, NA_real_) expect_equal(pwSurvivalTime11$pi2, NA_real_) expect_equal(pwSurvivalTime11$median1, 34.657359, tolerance = 1e-07) expect_equal(pwSurvivalTime11$median2, 27.725887, tolerance = 1e-07) expect_equal(pwSurvivalTime11$eventTime, NA_real_) expect_equal(pwSurvivalTime11$kappa, 1) expect_equal(pwSurvivalTime11$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime11$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime11$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime11), NA))) expect_output(print(pwSurvivalTime11)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime11), NA))) expect_output(summary(pwSurvivalTime11)$show()) pwSurvivalTime11CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime11, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime11CodeBased$piecewiseSurvivalTime, pwSurvivalTime11$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$lambda1, pwSurvivalTime11$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$lambda2, pwSurvivalTime11$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$hazardRatio, pwSurvivalTime11$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$pi1, pwSurvivalTime11$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$pi2, pwSurvivalTime11$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$median1, pwSurvivalTime11$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$median2, pwSurvivalTime11$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$eventTime, pwSurvivalTime11$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$kappa, pwSurvivalTime11$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime11$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$delayedResponseAllowed, pwSurvivalTime11$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime11CodeBased$delayedResponseEnabled, pwSurvivalTime11$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime11), "character") df <- as.data.frame(pwSurvivalTime11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime12 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6), lambda2 = c(0.025, 0.01), hazardRatio = c(0.8, 0.9)) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime12' with expected results expect_equal(pwSurvivalTime12$piecewiseSurvivalTime, c(0, 6)) expect_equal(pwSurvivalTime12$lambda1, NA_real_) expect_equal(pwSurvivalTime12$lambda2, c(0.025, 0.01), tolerance = 1e-07) expect_equal(pwSurvivalTime12$hazardRatio, c(0.8, 0.9), tolerance = 1e-07) expect_equal(pwSurvivalTime12$pi1, NA_real_) expect_equal(pwSurvivalTime12$pi2, NA_real_) expect_equal(pwSurvivalTime12$median1, NA_real_) expect_equal(pwSurvivalTime12$median2, NA_real_) expect_equal(pwSurvivalTime12$eventTime, NA_real_) expect_equal(pwSurvivalTime12$kappa, 1) expect_equal(pwSurvivalTime12$piecewiseSurvivalEnabled, TRUE) expect_equal(pwSurvivalTime12$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime12$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime12), NA))) expect_output(print(pwSurvivalTime12)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime12), NA))) expect_output(summary(pwSurvivalTime12)$show()) pwSurvivalTime12CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime12, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime12CodeBased$piecewiseSurvivalTime, pwSurvivalTime12$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$lambda1, pwSurvivalTime12$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$lambda2, pwSurvivalTime12$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$hazardRatio, pwSurvivalTime12$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$pi1, pwSurvivalTime12$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$pi2, pwSurvivalTime12$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$median1, pwSurvivalTime12$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$median2, pwSurvivalTime12$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$eventTime, pwSurvivalTime12$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$kappa, pwSurvivalTime12$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime12$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$delayedResponseAllowed, pwSurvivalTime12$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime12CodeBased$delayedResponseEnabled, pwSurvivalTime12$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime12), "character") df <- as.data.frame(pwSurvivalTime12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime13 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6), lambda2 = c(0.025, 0.01), hazardRatio = c(0.8, 0.9), delayedResponseAllowed = TRUE) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime13' with expected results expect_equal(pwSurvivalTime13$piecewiseSurvivalTime, c(0, 6)) expect_equal(pwSurvivalTime13$lambda1, c(0.02, 0.009), tolerance = 1e-07) expect_equal(pwSurvivalTime13$lambda2, c(0.025, 0.01), tolerance = 1e-07) expect_equal(pwSurvivalTime13$hazardRatio, c(0.8, 0.9), tolerance = 1e-07) expect_equal(pwSurvivalTime13$pi1, NA_real_) expect_equal(pwSurvivalTime13$pi2, NA_real_) expect_equal(pwSurvivalTime13$median1, NA_real_) expect_equal(pwSurvivalTime13$median2, NA_real_) expect_equal(pwSurvivalTime13$eventTime, NA_real_) expect_equal(pwSurvivalTime13$kappa, 1) expect_equal(pwSurvivalTime13$piecewiseSurvivalEnabled, TRUE) expect_equal(pwSurvivalTime13$delayedResponseAllowed, TRUE) expect_equal(pwSurvivalTime13$delayedResponseEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime13), NA))) expect_output(print(pwSurvivalTime13)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime13), NA))) expect_output(summary(pwSurvivalTime13)$show()) pwSurvivalTime13CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime13, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime13CodeBased$piecewiseSurvivalTime, pwSurvivalTime13$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$lambda1, pwSurvivalTime13$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$lambda2, pwSurvivalTime13$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$hazardRatio, pwSurvivalTime13$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$pi1, pwSurvivalTime13$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$pi2, pwSurvivalTime13$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$median1, pwSurvivalTime13$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$median2, pwSurvivalTime13$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$eventTime, pwSurvivalTime13$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$kappa, pwSurvivalTime13$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime13$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$delayedResponseAllowed, pwSurvivalTime13$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime13CodeBased$delayedResponseEnabled, pwSurvivalTime13$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime13), "character") df <- as.data.frame(pwSurvivalTime13) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime13) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # case 2.2: error expected expect_error(getPiecewiseSurvivalTime( piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), lambda1 = c(0.03, 0.04, 0.025) ), paste0( "Illegal argument: 'hazardRatio' can only be calculated if ", "'unique(lambda1 / lambda2)' result in a single value; ", "current result = c(1.2, 1, 1.667) (e.g., delayed response is not allowed)" ), fixed = TRUE ) # case 3 expect_false(getPiecewiseSurvivalTime(delayedResponseAllowed = TRUE)$isPiecewiseSurvivalEnabled()) expect_false(getPiecewiseSurvivalTime( piecewiseSurvivalTime = NA, delayedResponseAllowed = TRUE )$isPiecewiseSurvivalEnabled()) # case 3.1 pwSurvivalTimeSim1 <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), hazardRatio = 0.8, delayedResponseAllowed = TRUE ) expect_equal(pwSurvivalTimeSim1$hazardRatio, 0.8) expect_equal(pwSurvivalTimeSim1$lambda1, c(0.025, 0.04, 0.015) * 0.8) expect_false(pwSurvivalTimeSim1$isDelayedResponseEnabled()) # case 3.2 pwSurvivalTimeSim2 <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = c(0, 6, 9), lambda2 = c(0.025, 0.04, 0.015), lambda1 = c(0.03, 0.04, 0.025), delayedResponseAllowed = TRUE ) expect_true(pwSurvivalTimeSim2$isPiecewiseSurvivalEnabled()) expect_true(pwSurvivalTimeSim2$isDelayedResponseEnabled()) expect_equal(pwSurvivalTimeSim2$hazardRatio, c(1.2, 1, 5 / 3)) pwsTime1 <- getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4) expect_equal(pwsTime1$.isLambdaBased(minNumberOfLambdas = 1), TRUE) }) test_that("Testing 'getPiecewiseSurvivalTime': check error and warnings", { # @refFS[Tab.]{fs:tab:output:getPiecewiseSurvivalTime} expect_error(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4), "Conflicting arguments: it is not allowed to specify 'pi2' (0.4) and 'lambda2' (0.4) concurrently", fixed = TRUE ) expect_error(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4, pi1 = 0.3), "Conflicting arguments: it is not allowed to specify 'pi1' (0.3) and 'lambda2' (0.4) concurrently", fixed = TRUE ) expect_error(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), lambda2 = 0.4, pi2 = 0.4, pi1 = 0.3), "Conflicting arguments: it is not allowed to specify 'pi1' (0.3) and 'lambda2' (0.4) concurrently", fixed = TRUE ) expect_error(getPiecewiseSurvivalTime(lambda2 = 0.4, lambda1 = 0.3, pi2 = 0.4, pi1 = 0.3), "Conflicting arguments: it is not allowed to specify 'pi1' (0.3) and 'lambda1' (0.3) concurrently", fixed = TRUE ) expect_error(getPiecewiseSurvivalTime(lambda2 = 0.4, lambda1 = 0.3, pi2 = 0.4, pi1 = 0.3), "Conflicting arguments: it is not allowed to specify 'pi1' (0.3) and 'lambda1' (0.3) concurrently", fixed = TRUE ) expect_equal(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi2 = 0.4)$.isPiBased(), TRUE) expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi2 = 0.4, pi1 = 0.3), "'hazardRatio' (0.6, 0.8) will be ignored because it will be calculated", fixed = TRUE ) expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi1 = 0.3), "'hazardRatio' (0.6, 0.8) will be ignored because it will be calculated", fixed = TRUE ) expect_error(getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6), lambda2 = 0.025, hazardRatio = 0.8, delayedResponseAllowed = TRUE), "Illegal argument: length of 'piecewiseSurvivalTime' (2) and length of 'lambda2' (1) must be equal", fixed = TRUE ) expect_error(getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6, 12), lambda2 = 0.025, hazardRatio = 0.8, delayedResponseAllowed = TRUE), "Illegal argument: length of 'piecewiseSurvivalTime' (3) and length of 'lambda2' (1) must be equal", fixed = TRUE ) expect_error(getPiecewiseSurvivalTime(piecewiseSurvivalTime = c(0, 6), lambda2 = 0.025, hazardRatio = 0.8), "Illegal argument: length of 'piecewiseSurvivalTime' (2) and length of 'lambda2' (1) must be equal", fixed = TRUE ) }) test_that("Testing 'getPiecewiseSurvivalTime': list-wise definition", { # @refFS[Tab.]{fs:tab:output:getPiecewiseSurvivalTime} pwSurvivalTime8 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = list( "<6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007 ), hazardRatio = 0.6) expect_true(pwSurvivalTime8$isPiecewiseSurvivalEnabled()) expect_false(pwSurvivalTime8$isDelayedResponseEnabled()) expect_equal(pwSurvivalTime8$hazardRatio, 0.6) expect_equal(pwSurvivalTime8$piecewiseSurvivalTime, c(0, 6, 9, 15, 21)) expect_equal(pwSurvivalTime8$lambda2, c(0.025, 0.040, 0.015, 0.010, 0.007)) expect_equal(pwSurvivalTime8$lambda1, c(0.0150, 0.0240, 0.0090, 0.0060, 0.0042)) .skipTestIfDisabled() result1 <- getPiecewiseSurvivalTime(list( "<5" = 0.1, "5 - <10" = 0.2, ">=10" = 0.8 ), hazardRatio = 0.8) expect_equal(result1$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(result1$lambda2, c(0.1, 0.2, 0.8)) result2 <- getPiecewiseSurvivalTime(list( "0 - <5" = 0.1, "5 - <10" = 0.2, "10 - Inf" = 0.8 ), hazardRatio = 0.8) expect_equal(result2$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(result2$lambda2, c(0.1, 0.2, 0.8)) pwSurvivalTime2 <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.1, 0.2, 0.8), hazardRatio = 0.8 ) expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, c(0, 5, 10)) expect_equal(pwSurvivalTime2$lambda2, c(0.1, 0.2, 0.8)) pwSurvivalTime3 <- getPiecewiseSurvivalTime(c(0, 6), lambda2 = c(0.01, 0.03), hazardRatio = 0.8) expect_equal(pwSurvivalTime3$piecewiseSurvivalTime, c(0, 6)) expect_equal(pwSurvivalTime3$lambda2, c(0.01, 0.03)) pwSurvivalTime4 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = list("0 - ?" = 0.025), hazardRatio = 0.8, delayedResponseAllowed = TRUE) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime4' with expected results expect_equal(pwSurvivalTime4$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime4$lambda1, 0.02, tolerance = 1e-07) expect_equal(pwSurvivalTime4$lambda2, 0.025, tolerance = 1e-07) expect_equal(pwSurvivalTime4$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime4$pi1, NA_real_) expect_equal(pwSurvivalTime4$pi2, NA_real_) expect_equal(pwSurvivalTime4$median1, 34.657359, tolerance = 1e-07) expect_equal(pwSurvivalTime4$median2, 27.725887, tolerance = 1e-07) expect_equal(pwSurvivalTime4$eventTime, NA_real_) expect_equal(pwSurvivalTime4$kappa, 1) expect_equal(pwSurvivalTime4$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime4$delayedResponseAllowed, TRUE) expect_equal(pwSurvivalTime4$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime4), NA))) expect_output(print(pwSurvivalTime4)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime4), NA))) expect_output(summary(pwSurvivalTime4)$show()) pwSurvivalTime4CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime4, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime4CodeBased$piecewiseSurvivalTime, pwSurvivalTime4$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$lambda1, pwSurvivalTime4$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$lambda2, pwSurvivalTime4$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$hazardRatio, pwSurvivalTime4$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$pi1, pwSurvivalTime4$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$pi2, pwSurvivalTime4$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$median1, pwSurvivalTime4$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$median2, pwSurvivalTime4$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$eventTime, pwSurvivalTime4$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$kappa, pwSurvivalTime4$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime4$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$delayedResponseAllowed, pwSurvivalTime4$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime4CodeBased$delayedResponseEnabled, pwSurvivalTime4$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime4), "character") df <- as.data.frame(pwSurvivalTime4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime5 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = list("x" = 0.025), hazardRatio = 0.8, delayedResponseAllowed = TRUE) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime5' with expected results expect_equal(pwSurvivalTime5$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime5$lambda1, 0.02, tolerance = 1e-07) expect_equal(pwSurvivalTime5$lambda2, 0.025, tolerance = 1e-07) expect_equal(pwSurvivalTime5$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime5$pi1, NA_real_) expect_equal(pwSurvivalTime5$pi2, NA_real_) expect_equal(pwSurvivalTime5$median1, 34.657359, tolerance = 1e-07) expect_equal(pwSurvivalTime5$median2, 27.725887, tolerance = 1e-07) expect_equal(pwSurvivalTime5$eventTime, NA_real_) expect_equal(pwSurvivalTime5$kappa, 1) expect_equal(pwSurvivalTime5$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime5$delayedResponseAllowed, TRUE) expect_equal(pwSurvivalTime5$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime5), NA))) expect_output(print(pwSurvivalTime5)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime5), NA))) expect_output(summary(pwSurvivalTime5)$show()) pwSurvivalTime5CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime5, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime5CodeBased$piecewiseSurvivalTime, pwSurvivalTime5$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$lambda1, pwSurvivalTime5$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$lambda2, pwSurvivalTime5$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$hazardRatio, pwSurvivalTime5$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$pi1, pwSurvivalTime5$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$pi2, pwSurvivalTime5$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$median1, pwSurvivalTime5$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$median2, pwSurvivalTime5$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$eventTime, pwSurvivalTime5$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$kappa, pwSurvivalTime5$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime5$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$delayedResponseAllowed, pwSurvivalTime5$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime5CodeBased$delayedResponseEnabled, pwSurvivalTime5$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime5), "character") df <- as.data.frame(pwSurvivalTime5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime6 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = list("0 - 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime7 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = list("x" = 0.025), hazardRatio = 0.8, delayedResponseAllowed = FALSE) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime7' with expected results expect_equal(pwSurvivalTime7$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime7$lambda1, 0.02, tolerance = 1e-07) expect_equal(pwSurvivalTime7$lambda2, 0.025, tolerance = 1e-07) expect_equal(pwSurvivalTime7$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime7$pi1, NA_real_) expect_equal(pwSurvivalTime7$pi2, NA_real_) expect_equal(pwSurvivalTime7$median1, 34.657359, tolerance = 1e-07) expect_equal(pwSurvivalTime7$median2, 27.725887, tolerance = 1e-07) expect_equal(pwSurvivalTime7$eventTime, NA_real_) expect_equal(pwSurvivalTime7$kappa, 1) expect_equal(pwSurvivalTime7$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime7$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime7$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime7), NA))) expect_output(print(pwSurvivalTime7)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime7), NA))) expect_output(summary(pwSurvivalTime7)$show()) pwSurvivalTime7CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime7, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime7CodeBased$piecewiseSurvivalTime, pwSurvivalTime7$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$lambda1, pwSurvivalTime7$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$lambda2, pwSurvivalTime7$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$hazardRatio, pwSurvivalTime7$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$pi1, pwSurvivalTime7$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$pi2, pwSurvivalTime7$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$median1, pwSurvivalTime7$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$median2, pwSurvivalTime7$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$eventTime, pwSurvivalTime7$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$kappa, pwSurvivalTime7$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime7$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$delayedResponseAllowed, pwSurvivalTime7$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime7CodeBased$delayedResponseEnabled, pwSurvivalTime7$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime7), "character") df <- as.data.frame(pwSurvivalTime7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime8 <- getPiecewiseSurvivalTime(piecewiseSurvivalTime = list("0 - 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } expect_warning(getPiecewiseSurvivalTime(piecewiseSurvivalTime = list("<6" = 0.025), hazardRatio = 0.8), "Defined time period \"0 - <6\" will be ignored because 'piecewiseSurvivalTime' list has only 1 entry", fixed = TRUE ) }) test_plan_section("Testing Class 'AccrualTime'") test_that("Testing 'getAccrualTime': isAccrualTimeEnabled()", { expect_true(getAccrualTime()$isAccrualTimeEnabled()) expect_true(getAccrualTime(maxNumberOfSubjects = 100)$isAccrualTimeEnabled()) }) test_that("Testing 'getAccrualTime': vector based definition", { accrualTime1 <- getAccrualTime( accrualTime = c(0, 6, 9, 15), accrualIntensity = c(15, 21, 27), maxNumberOfSubjects = 315 ) expect_equal(accrualTime1$accrualTime, c(0, 6, 9, 15)) expect_equal(accrualTime1$accrualIntensity, c(15, 21, 27)) expect_equal(accrualTime1$remainingTime, NA_real_) accrualTime2 <- getAccrualTime( accrualTime = c(0, 6, 9), accrualIntensity = c(15, 21, 27), maxNumberOfSubjects = 1000 ) expect_equal(accrualTime2$accrualTime, c(0, 6, 9, 40.37037)) expect_equal(accrualTime2$accrualIntensity, c(15, 21, 27)) expect_equal(accrualTime2$remainingTime, 31.37037) .skipTestIfDisabled() accrualTime3 <- getAccrualTime( accrualTime = c(0, 12, 13, 14, 15, 16), accrualIntensity = c(15, 21, 27, 33, 39, 45), maxNumberOfSubjects = 1405 ) expect_equal(accrualTime3$accrualTime, c(0, 12, 13, 14, 15, 16, 40.55555556)) expect_equal(accrualTime3$accrualIntensity, c(15, 21, 27, 33, 39, 45)) expect_equal(accrualTime3$remainingTime, 24.55555556) accrualTime4 <- getAccrualTime( accrualTime = c(0, 24), accrualIntensity = c(30), maxNumberOfSubjects = 720 ) ## Comparison of the results of AccrualTime object 'accrualTime4' with expected results expect_equal(accrualTime4$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime4$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime4$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime4$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime4$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime4$accrualTime, c(0, 24)) expect_equal(accrualTime4$accrualIntensity, 30) expect_equal(accrualTime4$accrualIntensityRelative, NA_real_) expect_equal(accrualTime4$maxNumberOfSubjects, 720) expect_equal(accrualTime4$remainingTime, NA_real_) expect_equal(accrualTime4$piecewiseAccrualEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime4), NA))) expect_output(print(accrualTime4)$show()) invisible(capture.output(expect_error(summary(accrualTime4), NA))) expect_output(summary(accrualTime4)$show()) accrualTime4CodeBased <- eval(parse(text = getObjectRCode(accrualTime4, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime4CodeBased$endOfAccrualIsUserDefined, accrualTime4$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$followUpTimeMustBeUserDefined, accrualTime4$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime4$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime4$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$absoluteAccrualIntensityEnabled, accrualTime4$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$accrualTime, accrualTime4$accrualTime, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$accrualIntensity, accrualTime4$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$accrualIntensityRelative, accrualTime4$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$maxNumberOfSubjects, accrualTime4$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$remainingTime, accrualTime4$remainingTime, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$piecewiseAccrualEnabled, accrualTime4$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime4), "character") df <- as.data.frame(accrualTime4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime5 <- getAccrualTime( accrualTime = c(0, 24, 30), accrualIntensity = c(30, 45) ) ## Comparison of the results of AccrualTime object 'accrualTime5' with expected results expect_equal(accrualTime5$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime5$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime5$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime5$accrualTime, c(0, 24, 30)) expect_equal(accrualTime5$accrualIntensity, c(30, 45)) expect_equal(accrualTime5$accrualIntensityRelative, NA_real_) expect_equal(accrualTime5$maxNumberOfSubjects, 990) expect_equal(accrualTime5$remainingTime, 6) expect_equal(accrualTime5$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime5), NA))) expect_output(print(accrualTime5)$show()) invisible(capture.output(expect_error(summary(accrualTime5), NA))) expect_output(summary(accrualTime5)$show()) accrualTime5CodeBased <- eval(parse(text = getObjectRCode(accrualTime5, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime5CodeBased$endOfAccrualIsUserDefined, accrualTime5$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$followUpTimeMustBeUserDefined, accrualTime5$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime5$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime5$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$absoluteAccrualIntensityEnabled, accrualTime5$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$accrualTime, accrualTime5$accrualTime, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$accrualIntensity, accrualTime5$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$accrualIntensityRelative, accrualTime5$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$maxNumberOfSubjects, accrualTime5$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$remainingTime, accrualTime5$remainingTime, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$piecewiseAccrualEnabled, accrualTime5$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime5), "character") df <- as.data.frame(accrualTime5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime6 <- getAccrualTime( accrualTime = c(0, 24, 30), accrualIntensity = c(20, 25, 45), maxNumberOfSubjects = 720 ) ## Comparison of the results of AccrualTime object 'accrualTime6' with expected results expect_equal(accrualTime6$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime6$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime6$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime6$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime6$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime6$accrualTime, c(0, 24, 30, 32)) expect_equal(accrualTime6$accrualIntensity, c(20, 25, 45)) expect_equal(accrualTime6$accrualIntensityRelative, NA_real_) expect_equal(accrualTime6$maxNumberOfSubjects, 720) expect_equal(accrualTime6$remainingTime, 2) expect_equal(accrualTime6$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime6), NA))) expect_output(print(accrualTime6)$show()) invisible(capture.output(expect_error(summary(accrualTime6), NA))) expect_output(summary(accrualTime6)$show()) accrualTime6CodeBased <- eval(parse(text = getObjectRCode(accrualTime6, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime6CodeBased$endOfAccrualIsUserDefined, accrualTime6$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$followUpTimeMustBeUserDefined, accrualTime6$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime6$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime6$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$absoluteAccrualIntensityEnabled, accrualTime6$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$accrualTime, accrualTime6$accrualTime, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$accrualIntensity, accrualTime6$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$accrualIntensityRelative, accrualTime6$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$maxNumberOfSubjects, accrualTime6$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$remainingTime, accrualTime6$remainingTime, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$piecewiseAccrualEnabled, accrualTime6$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime6), "character") df <- as.data.frame(accrualTime6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime8 <- getAccrualTime(accrualTime = 0, accrualIntensity = 15, maxNumberOfSubjects = 1000) ## Comparison of the results of AccrualTime object 'accrualTime8' with expected results expect_equal(accrualTime8$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime8$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime8$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime8$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime8$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime8$accrualTime, c(0, 66.666667), tolerance = 1e-07) expect_equal(accrualTime8$accrualIntensity, 15) expect_equal(accrualTime8$accrualIntensityRelative, NA_real_) expect_equal(accrualTime8$maxNumberOfSubjects, 1000) expect_equal(accrualTime8$remainingTime, 66.666667, tolerance = 1e-07) expect_equal(accrualTime8$piecewiseAccrualEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime8), NA))) expect_output(print(accrualTime8)$show()) invisible(capture.output(expect_error(summary(accrualTime8), NA))) expect_output(summary(accrualTime8)$show()) accrualTime8CodeBased <- eval(parse(text = getObjectRCode(accrualTime8, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime8CodeBased$endOfAccrualIsUserDefined, accrualTime8$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$followUpTimeMustBeUserDefined, accrualTime8$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime8$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime8$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$absoluteAccrualIntensityEnabled, accrualTime8$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$accrualTime, accrualTime8$accrualTime, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$accrualIntensity, accrualTime8$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$accrualIntensityRelative, accrualTime8$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$maxNumberOfSubjects, accrualTime8$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$remainingTime, accrualTime8$remainingTime, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$piecewiseAccrualEnabled, accrualTime8$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime8), "character") df <- as.data.frame(accrualTime8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime9 <- getAccrualTime(accrualTime = c(0, 5), accrualIntensity = 15) ## Comparison of the results of AccrualTime object 'accrualTime9' with expected results expect_equal(accrualTime9$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime9$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime9$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime9$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime9$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime9$accrualTime, c(0, 5)) expect_equal(accrualTime9$accrualIntensity, 15) expect_equal(accrualTime9$accrualIntensityRelative, NA_real_) expect_equal(accrualTime9$maxNumberOfSubjects, 75) expect_equal(accrualTime9$remainingTime, 5) expect_equal(accrualTime9$piecewiseAccrualEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime9), NA))) expect_output(print(accrualTime9)$show()) invisible(capture.output(expect_error(summary(accrualTime9), NA))) expect_output(summary(accrualTime9)$show()) accrualTime9CodeBased <- eval(parse(text = getObjectRCode(accrualTime9, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime9CodeBased$endOfAccrualIsUserDefined, accrualTime9$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$followUpTimeMustBeUserDefined, accrualTime9$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime9$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime9$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$absoluteAccrualIntensityEnabled, accrualTime9$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$accrualTime, accrualTime9$accrualTime, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$accrualIntensity, accrualTime9$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$accrualIntensityRelative, accrualTime9$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$maxNumberOfSubjects, accrualTime9$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$remainingTime, accrualTime9$remainingTime, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$piecewiseAccrualEnabled, accrualTime9$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime9), "character") df <- as.data.frame(accrualTime9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime10 <- getAccrualTime(accrualTime = 0, accrualIntensity = 15, maxNumberOfSubjects = 10) ## Comparison of the results of AccrualTime object 'accrualTime10' with expected results expect_equal(accrualTime10$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime10$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime10$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime10$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime10$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime10$accrualTime, c(0, 0.66666667), tolerance = 1e-07) expect_equal(accrualTime10$accrualIntensity, 15) expect_equal(accrualTime10$accrualIntensityRelative, NA_real_) expect_equal(accrualTime10$maxNumberOfSubjects, 10) expect_equal(accrualTime10$remainingTime, 0.66666667, tolerance = 1e-07) expect_equal(accrualTime10$piecewiseAccrualEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime10), NA))) expect_output(print(accrualTime10)$show()) invisible(capture.output(expect_error(summary(accrualTime10), NA))) expect_output(summary(accrualTime10)$show()) accrualTime10CodeBased <- eval(parse(text = getObjectRCode(accrualTime10, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime10CodeBased$endOfAccrualIsUserDefined, accrualTime10$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$followUpTimeMustBeUserDefined, accrualTime10$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime10$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime10$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$absoluteAccrualIntensityEnabled, accrualTime10$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$accrualTime, accrualTime10$accrualTime, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$accrualIntensity, accrualTime10$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$accrualIntensityRelative, accrualTime10$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$maxNumberOfSubjects, accrualTime10$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$remainingTime, accrualTime10$remainingTime, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$piecewiseAccrualEnabled, accrualTime10$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime10), "character") df <- as.data.frame(accrualTime10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime11 <- getAccrualTime(accrualTime = c(0, 5), accrualIntensity = 15, maxNumberOfSubjects = 75) ## Comparison of the results of AccrualTime object 'accrualTime11' with expected results expect_equal(accrualTime11$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime11$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime11$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime11$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime11$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime11$accrualTime, c(0, 5)) expect_equal(accrualTime11$accrualIntensity, 15) expect_equal(accrualTime11$accrualIntensityRelative, NA_real_) expect_equal(accrualTime11$maxNumberOfSubjects, 75) expect_equal(accrualTime11$remainingTime, NA_real_) expect_equal(accrualTime11$piecewiseAccrualEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime11), NA))) expect_output(print(accrualTime11)$show()) invisible(capture.output(expect_error(summary(accrualTime11), NA))) expect_output(summary(accrualTime11)$show()) accrualTime11CodeBased <- eval(parse(text = getObjectRCode(accrualTime11, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime11CodeBased$endOfAccrualIsUserDefined, accrualTime11$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime11CodeBased$followUpTimeMustBeUserDefined, accrualTime11$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime11CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime11$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime11CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime11$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime11CodeBased$absoluteAccrualIntensityEnabled, accrualTime11$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime11CodeBased$accrualTime, accrualTime11$accrualTime, tolerance = 1e-05) expect_equal(accrualTime11CodeBased$accrualIntensity, accrualTime11$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime11CodeBased$accrualIntensityRelative, accrualTime11$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime11CodeBased$maxNumberOfSubjects, accrualTime11$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime11CodeBased$remainingTime, accrualTime11$remainingTime, tolerance = 1e-05) expect_equal(accrualTime11CodeBased$piecewiseAccrualEnabled, accrualTime11$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime11), "character") df <- as.data.frame(accrualTime11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime12 <- getAccrualTime(accrualTime = c(0, 6, 15, 25), accrualIntensity = c(22, 0, 33)) ## Comparison of the results of AccrualTime object 'accrualTime12' with expected results expect_equal(accrualTime12$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime12$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime12$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime12$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime12$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime12$accrualTime, c(0, 6, 15, 25)) expect_equal(accrualTime12$accrualIntensity, c(22, 0, 33)) expect_equal(accrualTime12$accrualIntensityRelative, NA_real_) expect_equal(accrualTime12$maxNumberOfSubjects, 462) expect_equal(accrualTime12$remainingTime, 10) expect_equal(accrualTime12$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime12), NA))) expect_output(print(accrualTime12)$show()) invisible(capture.output(expect_error(summary(accrualTime12), NA))) expect_output(summary(accrualTime12)$show()) accrualTime12CodeBased <- eval(parse(text = getObjectRCode(accrualTime12, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime12CodeBased$endOfAccrualIsUserDefined, accrualTime12$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$followUpTimeMustBeUserDefined, accrualTime12$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime12$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime12$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$absoluteAccrualIntensityEnabled, accrualTime12$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$accrualTime, accrualTime12$accrualTime, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$accrualIntensity, accrualTime12$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$accrualIntensityRelative, accrualTime12$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$maxNumberOfSubjects, accrualTime12$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$remainingTime, accrualTime12$remainingTime, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$piecewiseAccrualEnabled, accrualTime12$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime12), "character") df <- as.data.frame(accrualTime12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime13 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000) ## Comparison of the results of AccrualTime object 'accrualTime13' with expected results expect_equal(accrualTime13$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime13$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime13$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime13$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime13$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime13$accrualTime, c(0, 6, 32.30303), tolerance = 1e-07) expect_equal(accrualTime13$accrualIntensity, c(22, 33)) expect_equal(accrualTime13$accrualIntensityRelative, NA_real_) expect_equal(accrualTime13$maxNumberOfSubjects, 1000) expect_equal(accrualTime13$remainingTime, 26.30303, tolerance = 1e-07) expect_equal(accrualTime13$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime13), NA))) expect_output(print(accrualTime13)$show()) invisible(capture.output(expect_error(summary(accrualTime13), NA))) expect_output(summary(accrualTime13)$show()) accrualTime13CodeBased <- eval(parse(text = getObjectRCode(accrualTime13, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime13CodeBased$endOfAccrualIsUserDefined, accrualTime13$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$followUpTimeMustBeUserDefined, accrualTime13$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime13$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime13$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$absoluteAccrualIntensityEnabled, accrualTime13$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$accrualTime, accrualTime13$accrualTime, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$accrualIntensity, accrualTime13$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$accrualIntensityRelative, accrualTime13$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$maxNumberOfSubjects, accrualTime13$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$remainingTime, accrualTime13$remainingTime, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$piecewiseAccrualEnabled, accrualTime13$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime13), "character") df <- as.data.frame(accrualTime13) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime13) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Testing 'getAccrualTime': test absolute and relative definition", { # @refFS[Tab.]{fs:tab:output:getAccrualTime} accrualTime1 <- getAccrualTime( accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33), maxNumberOfSubjects = 924 ) ## Comparison of the results of AccrualTime object 'accrualTime1' with expected results expect_equal(accrualTime1$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime1$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime1$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime1$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime1$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime1$accrualTime, c(0, 6, 30)) expect_equal(accrualTime1$accrualIntensity, c(22, 33)) expect_equal(accrualTime1$accrualIntensityRelative, NA_real_) expect_equal(accrualTime1$maxNumberOfSubjects, 924) expect_equal(accrualTime1$remainingTime, NA_real_) expect_equal(accrualTime1$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime1), NA))) expect_output(print(accrualTime1)$show()) invisible(capture.output(expect_error(summary(accrualTime1), NA))) expect_output(summary(accrualTime1)$show()) accrualTime1CodeBased <- eval(parse(text = getObjectRCode(accrualTime1, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime1CodeBased$endOfAccrualIsUserDefined, accrualTime1$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime1CodeBased$followUpTimeMustBeUserDefined, accrualTime1$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime1CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime1$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime1CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime1$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime1CodeBased$absoluteAccrualIntensityEnabled, accrualTime1$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime1CodeBased$accrualTime, accrualTime1$accrualTime, tolerance = 1e-05) expect_equal(accrualTime1CodeBased$accrualIntensity, accrualTime1$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime1CodeBased$accrualIntensityRelative, accrualTime1$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime1CodeBased$maxNumberOfSubjects, accrualTime1$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime1CodeBased$remainingTime, accrualTime1$remainingTime, tolerance = 1e-05) expect_equal(accrualTime1CodeBased$piecewiseAccrualEnabled, accrualTime1$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime1), "character") df <- as.data.frame(accrualTime1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime2 <- getAccrualTime(list( "0 - <6" = 22, "6 - <=30" = 33 ), maxNumberOfSubjects = 924 ) ## Comparison of the results of AccrualTime object 'accrualTime2' with expected results expect_equal(accrualTime2$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime2$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime2$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime2$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime2$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime2$accrualTime, c(0, 6, 30)) expect_equal(accrualTime2$accrualIntensity, c(22, 33)) expect_equal(accrualTime2$accrualIntensityRelative, NA_real_) expect_equal(accrualTime2$maxNumberOfSubjects, 924) expect_equal(accrualTime2$remainingTime, NA_real_) expect_equal(accrualTime2$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime2), NA))) expect_output(print(accrualTime2)$show()) invisible(capture.output(expect_error(summary(accrualTime2), NA))) expect_output(summary(accrualTime2)$show()) accrualTime2CodeBased <- eval(parse(text = getObjectRCode(accrualTime2, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime2CodeBased$endOfAccrualIsUserDefined, accrualTime2$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime2CodeBased$followUpTimeMustBeUserDefined, accrualTime2$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime2CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime2$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime2CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime2$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime2CodeBased$absoluteAccrualIntensityEnabled, accrualTime2$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime2CodeBased$accrualTime, accrualTime2$accrualTime, tolerance = 1e-05) expect_equal(accrualTime2CodeBased$accrualIntensity, accrualTime2$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime2CodeBased$accrualIntensityRelative, accrualTime2$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime2CodeBased$maxNumberOfSubjects, accrualTime2$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime2CodeBased$remainingTime, accrualTime2$remainingTime, tolerance = 1e-05) expect_equal(accrualTime2CodeBased$piecewiseAccrualEnabled, accrualTime2$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime2), "character") df <- as.data.frame(accrualTime2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() accrualTime3 <- getAccrualTime( accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000 ) ## Comparison of the results of AccrualTime object 'accrualTime3' with expected results expect_equal(accrualTime3$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime3$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime3$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime3$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime3$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime3$accrualTime, c(0, 6, 30)) expect_equal(accrualTime3$accrualIntensity, c(23.809524, 35.714286), tolerance = 1e-07) expect_equal(accrualTime3$accrualIntensityRelative, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime3$maxNumberOfSubjects, 1000) expect_equal(accrualTime3$remainingTime, 24) expect_equal(accrualTime3$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime3), NA))) expect_output(print(accrualTime3)$show()) invisible(capture.output(expect_error(summary(accrualTime3), NA))) expect_output(summary(accrualTime3)$show()) accrualTime3CodeBased <- eval(parse(text = getObjectRCode(accrualTime3, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime3CodeBased$endOfAccrualIsUserDefined, accrualTime3$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime3CodeBased$followUpTimeMustBeUserDefined, accrualTime3$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime3CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime3$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime3CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime3$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime3CodeBased$absoluteAccrualIntensityEnabled, accrualTime3$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime3CodeBased$accrualTime, accrualTime3$accrualTime, tolerance = 1e-05) expect_equal(accrualTime3CodeBased$accrualIntensity, accrualTime3$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime3CodeBased$accrualIntensityRelative, accrualTime3$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime3CodeBased$maxNumberOfSubjects, accrualTime3$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime3CodeBased$remainingTime, accrualTime3$remainingTime, tolerance = 1e-05) expect_equal(accrualTime3CodeBased$piecewiseAccrualEnabled, accrualTime3$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime3), "character") df <- as.data.frame(accrualTime3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime4 <- getAccrualTime(list( "0 - <6" = 0.22, "6 - <=30" = 0.33 ), maxNumberOfSubjects = 1000 ) ## Comparison of the results of AccrualTime object 'accrualTime4' with expected results expect_equal(accrualTime4$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime4$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime4$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime4$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime4$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime4$accrualTime, c(0, 6, 30)) expect_equal(accrualTime4$accrualIntensity, c(23.809524, 35.714286), tolerance = 1e-07) expect_equal(accrualTime4$accrualIntensityRelative, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime4$maxNumberOfSubjects, 1000) expect_equal(accrualTime4$remainingTime, 24) expect_equal(accrualTime4$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime4), NA))) expect_output(print(accrualTime4)$show()) invisible(capture.output(expect_error(summary(accrualTime4), NA))) expect_output(summary(accrualTime4)$show()) accrualTime4CodeBased <- eval(parse(text = getObjectRCode(accrualTime4, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime4CodeBased$endOfAccrualIsUserDefined, accrualTime4$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$followUpTimeMustBeUserDefined, accrualTime4$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime4$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime4$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$absoluteAccrualIntensityEnabled, accrualTime4$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$accrualTime, accrualTime4$accrualTime, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$accrualIntensity, accrualTime4$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$accrualIntensityRelative, accrualTime4$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$maxNumberOfSubjects, accrualTime4$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$remainingTime, accrualTime4$remainingTime, tolerance = 1e-05) expect_equal(accrualTime4CodeBased$piecewiseAccrualEnabled, accrualTime4$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime4), "character") df <- as.data.frame(accrualTime4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime5 <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33)) ## Comparison of the results of AccrualTime object 'accrualTime5' with expected results expect_equal(accrualTime5$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime5$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime5$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime5$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime5$accrualTime, c(0, 6, 30)) expect_equal(accrualTime5$accrualIntensity, c(22, 33)) expect_equal(accrualTime5$accrualIntensityRelative, NA_real_) expect_equal(accrualTime5$maxNumberOfSubjects, 924) expect_equal(accrualTime5$remainingTime, 24) expect_equal(accrualTime5$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime5), NA))) expect_output(print(accrualTime5)$show()) invisible(capture.output(expect_error(summary(accrualTime5), NA))) expect_output(summary(accrualTime5)$show()) accrualTime5CodeBased <- eval(parse(text = getObjectRCode(accrualTime5, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime5CodeBased$endOfAccrualIsUserDefined, accrualTime5$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$followUpTimeMustBeUserDefined, accrualTime5$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime5$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime5$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$absoluteAccrualIntensityEnabled, accrualTime5$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$accrualTime, accrualTime5$accrualTime, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$accrualIntensity, accrualTime5$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$accrualIntensityRelative, accrualTime5$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$maxNumberOfSubjects, accrualTime5$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$remainingTime, accrualTime5$remainingTime, tolerance = 1e-05) expect_equal(accrualTime5CodeBased$piecewiseAccrualEnabled, accrualTime5$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime5), "character") df <- as.data.frame(accrualTime5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime6 <- getAccrualTime(list( "0 - <6" = 22, "6 - <=30" = 33 )) ## Comparison of the results of AccrualTime object 'accrualTime6' with expected results expect_equal(accrualTime6$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime6$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime6$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime6$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime6$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime6$accrualTime, c(0, 6, 30)) expect_equal(accrualTime6$accrualIntensity, c(22, 33)) expect_equal(accrualTime6$accrualIntensityRelative, NA_real_) expect_equal(accrualTime6$maxNumberOfSubjects, 924) expect_equal(accrualTime6$remainingTime, 24) expect_equal(accrualTime6$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime6), NA))) expect_output(print(accrualTime6)$show()) invisible(capture.output(expect_error(summary(accrualTime6), NA))) expect_output(summary(accrualTime6)$show()) accrualTime6CodeBased <- eval(parse(text = getObjectRCode(accrualTime6, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime6CodeBased$endOfAccrualIsUserDefined, accrualTime6$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$followUpTimeMustBeUserDefined, accrualTime6$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime6$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime6$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$absoluteAccrualIntensityEnabled, accrualTime6$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$accrualTime, accrualTime6$accrualTime, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$accrualIntensity, accrualTime6$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$accrualIntensityRelative, accrualTime6$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$maxNumberOfSubjects, accrualTime6$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$remainingTime, accrualTime6$remainingTime, tolerance = 1e-05) expect_equal(accrualTime6CodeBased$piecewiseAccrualEnabled, accrualTime6$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime6), "character") df <- as.data.frame(accrualTime6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime7 <- getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33)) ## Comparison of the results of AccrualTime object 'accrualTime7' with expected results expect_equal(accrualTime7$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime7$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime7$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime7$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime7$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime7$accrualTime, c(0, 6, 30)) expect_equal(accrualTime7$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime7$accrualIntensityRelative, NA_real_) expect_equal(accrualTime7$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime7$remainingTime, NA_real_) expect_equal(accrualTime7$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime7), NA))) expect_output(print(accrualTime7)$show()) invisible(capture.output(expect_error(summary(accrualTime7), NA))) expect_output(summary(accrualTime7)$show()) accrualTime7CodeBased <- eval(parse(text = getObjectRCode(accrualTime7, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime7CodeBased$endOfAccrualIsUserDefined, accrualTime7$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime7CodeBased$followUpTimeMustBeUserDefined, accrualTime7$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime7CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime7$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime7CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime7$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime7CodeBased$absoluteAccrualIntensityEnabled, accrualTime7$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime7CodeBased$accrualTime, accrualTime7$accrualTime, tolerance = 1e-05) expect_equal(accrualTime7CodeBased$accrualIntensity, accrualTime7$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime7CodeBased$accrualIntensityRelative, accrualTime7$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime7CodeBased$maxNumberOfSubjects, accrualTime7$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime7CodeBased$remainingTime, accrualTime7$remainingTime, tolerance = 1e-05) expect_equal(accrualTime7CodeBased$piecewiseAccrualEnabled, accrualTime7$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime7), "character") df <- as.data.frame(accrualTime7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime8 <- getAccrualTime(list( "0 - <6" = 0.22, "6 - <=30" = 0.33 )) ## Comparison of the results of AccrualTime object 'accrualTime8' with expected results expect_equal(accrualTime8$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime8$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime8$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime8$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime8$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime8$accrualTime, c(0, 6, 30)) expect_equal(accrualTime8$accrualIntensity, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime8$accrualIntensityRelative, NA_real_) expect_equal(accrualTime8$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime8$remainingTime, NA_real_) expect_equal(accrualTime8$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime8), NA))) expect_output(print(accrualTime8)$show()) invisible(capture.output(expect_error(summary(accrualTime8), NA))) expect_output(summary(accrualTime8)$show()) accrualTime8CodeBased <- eval(parse(text = getObjectRCode(accrualTime8, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime8CodeBased$endOfAccrualIsUserDefined, accrualTime8$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$followUpTimeMustBeUserDefined, accrualTime8$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime8$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime8$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$absoluteAccrualIntensityEnabled, accrualTime8$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$accrualTime, accrualTime8$accrualTime, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$accrualIntensity, accrualTime8$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$accrualIntensityRelative, accrualTime8$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$maxNumberOfSubjects, accrualTime8$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$remainingTime, accrualTime8$remainingTime, tolerance = 1e-05) expect_equal(accrualTime8CodeBased$piecewiseAccrualEnabled, accrualTime8$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime8), "character") df <- as.data.frame(accrualTime8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime9 <- getAccrualTime( accrualTime = c(0, 6), accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000 ) ## Comparison of the results of AccrualTime object 'accrualTime9' with expected results expect_equal(accrualTime9$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime9$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime9$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime9$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime9$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime9$accrualTime, c(0, 6, 32.30303), tolerance = 1e-07) expect_equal(accrualTime9$accrualIntensity, c(22, 33)) expect_equal(accrualTime9$accrualIntensityRelative, NA_real_) expect_equal(accrualTime9$maxNumberOfSubjects, 1000) expect_equal(accrualTime9$remainingTime, 26.30303, tolerance = 1e-07) expect_equal(accrualTime9$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime9), NA))) expect_output(print(accrualTime9)$show()) invisible(capture.output(expect_error(summary(accrualTime9), NA))) expect_output(summary(accrualTime9)$show()) accrualTime9CodeBased <- eval(parse(text = getObjectRCode(accrualTime9, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime9CodeBased$endOfAccrualIsUserDefined, accrualTime9$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$followUpTimeMustBeUserDefined, accrualTime9$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime9$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime9$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$absoluteAccrualIntensityEnabled, accrualTime9$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$accrualTime, accrualTime9$accrualTime, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$accrualIntensity, accrualTime9$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$accrualIntensityRelative, accrualTime9$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$maxNumberOfSubjects, accrualTime9$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$remainingTime, accrualTime9$remainingTime, tolerance = 1e-05) expect_equal(accrualTime9CodeBased$piecewiseAccrualEnabled, accrualTime9$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime9), "character") df <- as.data.frame(accrualTime9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime10 <- getAccrualTime(list( "0 - <6" = 22, "6" = 33 ), maxNumberOfSubjects = 1000 ) ## Comparison of the results of AccrualTime object 'accrualTime10' with expected results expect_equal(accrualTime10$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime10$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime10$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime10$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime10$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime10$accrualTime, c(0, 6, 32.30303), tolerance = 1e-07) expect_equal(accrualTime10$accrualIntensity, c(22, 33)) expect_equal(accrualTime10$accrualIntensityRelative, NA_real_) expect_equal(accrualTime10$maxNumberOfSubjects, 1000) expect_equal(accrualTime10$remainingTime, 26.30303, tolerance = 1e-07) expect_equal(accrualTime10$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime10), NA))) expect_output(print(accrualTime10)$show()) invisible(capture.output(expect_error(summary(accrualTime10), NA))) expect_output(summary(accrualTime10)$show()) accrualTime10CodeBased <- eval(parse(text = getObjectRCode(accrualTime10, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime10CodeBased$endOfAccrualIsUserDefined, accrualTime10$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$followUpTimeMustBeUserDefined, accrualTime10$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime10$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime10$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$absoluteAccrualIntensityEnabled, accrualTime10$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$accrualTime, accrualTime10$accrualTime, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$accrualIntensity, accrualTime10$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$accrualIntensityRelative, accrualTime10$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$maxNumberOfSubjects, accrualTime10$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$remainingTime, accrualTime10$remainingTime, tolerance = 1e-05) expect_equal(accrualTime10CodeBased$piecewiseAccrualEnabled, accrualTime10$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime10), "character") df <- as.data.frame(accrualTime10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime12 <- getAccrualTime(list( "0 - <6" = 0.22, "6 - <=30" = 0.33 ), maxNumberOfSubjects = 1000 ) ## Comparison of the results of AccrualTime object 'accrualTime12' with expected results expect_equal(accrualTime12$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime12$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime12$maxNumberOfSubjectsIsUserDefined, TRUE) expect_equal(accrualTime12$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime12$absoluteAccrualIntensityEnabled, FALSE) expect_equal(accrualTime12$accrualTime, c(0, 6, 30)) expect_equal(accrualTime12$accrualIntensity, c(23.809524, 35.714286), tolerance = 1e-07) expect_equal(accrualTime12$accrualIntensityRelative, c(0.22, 0.33), tolerance = 1e-07) expect_equal(accrualTime12$maxNumberOfSubjects, 1000) expect_equal(accrualTime12$remainingTime, 24) expect_equal(accrualTime12$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime12), NA))) expect_output(print(accrualTime12)$show()) invisible(capture.output(expect_error(summary(accrualTime12), NA))) expect_output(summary(accrualTime12)$show()) accrualTime12CodeBased <- eval(parse(text = getObjectRCode(accrualTime12, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime12CodeBased$endOfAccrualIsUserDefined, accrualTime12$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$followUpTimeMustBeUserDefined, accrualTime12$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime12$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime12$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$absoluteAccrualIntensityEnabled, accrualTime12$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$accrualTime, accrualTime12$accrualTime, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$accrualIntensity, accrualTime12$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$accrualIntensityRelative, accrualTime12$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$maxNumberOfSubjects, accrualTime12$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$remainingTime, accrualTime12$remainingTime, tolerance = 1e-05) expect_equal(accrualTime12CodeBased$piecewiseAccrualEnabled, accrualTime12$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime12), "character") df <- as.data.frame(accrualTime12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime13 <- getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33)) ## Comparison of the results of AccrualTime object 'accrualTime13' with expected results expect_equal(accrualTime13$endOfAccrualIsUserDefined, FALSE) expect_equal(accrualTime13$followUpTimeMustBeUserDefined, TRUE) expect_equal(accrualTime13$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime13$maxNumberOfSubjectsCanBeCalculatedDirectly, FALSE) expect_equal(accrualTime13$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime13$accrualTime, c(0, 6)) expect_equal(accrualTime13$accrualIntensity, c(22, 33)) expect_equal(accrualTime13$accrualIntensityRelative, NA_real_) expect_equal(accrualTime13$maxNumberOfSubjects, NA_real_) expect_equal(accrualTime13$remainingTime, NA_real_) expect_equal(accrualTime13$piecewiseAccrualEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime13), NA))) expect_output(print(accrualTime13)$show()) invisible(capture.output(expect_error(summary(accrualTime13), NA))) expect_output(summary(accrualTime13)$show()) accrualTime13CodeBased <- eval(parse(text = getObjectRCode(accrualTime13, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime13CodeBased$endOfAccrualIsUserDefined, accrualTime13$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$followUpTimeMustBeUserDefined, accrualTime13$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime13$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime13$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$absoluteAccrualIntensityEnabled, accrualTime13$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$accrualTime, accrualTime13$accrualTime, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$accrualIntensity, accrualTime13$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$accrualIntensityRelative, accrualTime13$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$maxNumberOfSubjects, accrualTime13$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$remainingTime, accrualTime13$remainingTime, tolerance = 1e-05) expect_equal(accrualTime13CodeBased$piecewiseAccrualEnabled, accrualTime13$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime13), "character") df <- as.data.frame(accrualTime13) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime13) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime14 <- getAccrualTime(list( "0 - <6" = 22, "6 - <=30" = 33 )) ## Comparison of the results of AccrualTime object 'accrualTime14' with expected results expect_equal(accrualTime14$endOfAccrualIsUserDefined, TRUE) expect_equal(accrualTime14$followUpTimeMustBeUserDefined, FALSE) expect_equal(accrualTime14$maxNumberOfSubjectsIsUserDefined, FALSE) expect_equal(accrualTime14$maxNumberOfSubjectsCanBeCalculatedDirectly, TRUE) expect_equal(accrualTime14$absoluteAccrualIntensityEnabled, TRUE) expect_equal(accrualTime14$accrualTime, c(0, 6, 30)) expect_equal(accrualTime14$accrualIntensity, c(22, 33)) expect_equal(accrualTime14$accrualIntensityRelative, NA_real_) expect_equal(accrualTime14$maxNumberOfSubjects, 924) expect_equal(accrualTime14$remainingTime, 24) expect_equal(accrualTime14$piecewiseAccrualEnabled, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(accrualTime14), NA))) expect_output(print(accrualTime14)$show()) invisible(capture.output(expect_error(summary(accrualTime14), NA))) expect_output(summary(accrualTime14)$show()) accrualTime14CodeBased <- eval(parse(text = getObjectRCode(accrualTime14, stringWrapParagraphWidth = NULL))) expect_equal(accrualTime14CodeBased$endOfAccrualIsUserDefined, accrualTime14$endOfAccrualIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime14CodeBased$followUpTimeMustBeUserDefined, accrualTime14$followUpTimeMustBeUserDefined, tolerance = 1e-05) expect_equal(accrualTime14CodeBased$maxNumberOfSubjectsIsUserDefined, accrualTime14$maxNumberOfSubjectsIsUserDefined, tolerance = 1e-05) expect_equal(accrualTime14CodeBased$maxNumberOfSubjectsCanBeCalculatedDirectly, accrualTime14$maxNumberOfSubjectsCanBeCalculatedDirectly, tolerance = 1e-05) expect_equal(accrualTime14CodeBased$absoluteAccrualIntensityEnabled, accrualTime14$absoluteAccrualIntensityEnabled, tolerance = 1e-05) expect_equal(accrualTime14CodeBased$accrualTime, accrualTime14$accrualTime, tolerance = 1e-05) expect_equal(accrualTime14CodeBased$accrualIntensity, accrualTime14$accrualIntensity, tolerance = 1e-05) expect_equal(accrualTime14CodeBased$accrualIntensityRelative, accrualTime14$accrualIntensityRelative, tolerance = 1e-05) expect_equal(accrualTime14CodeBased$maxNumberOfSubjects, accrualTime14$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(accrualTime14CodeBased$remainingTime, accrualTime14$remainingTime, tolerance = 1e-05) expect_equal(accrualTime14CodeBased$piecewiseAccrualEnabled, accrualTime14$piecewiseAccrualEnabled, tolerance = 1e-05) expect_type(names(accrualTime14), "character") df <- as.data.frame(accrualTime14) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(accrualTime14) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Testing 'getAccrualTime': check expected warnings and errors", { # @refFS[Tab.]{fs:tab:output:getAccrualTime} expect_warning(getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33)), paste0("The specified accrual time and intensity cannot be supplemented ", "automatically with the missing information; therefore further calculations are not possible"), fixed = TRUE ) expect_warning(getAccrualTime(accrualTime = c(0, 24), accrualIntensity = c(30, 45), maxNumberOfSubjects = 720), "Last accrual intensity value (45) ignored", fixed = TRUE ) .skipTestIfDisabled() suppressWarnings(expect_warning(getAccrualTime(accrualTime = c(0, 24, 30), accrualIntensity = c(30, 45, 55), maxNumberOfSubjects = 720), "Last 2 accrual intensity values (45, 55) ignored", fixed = TRUE )) suppressWarnings(expect_warning(getAccrualTime(accrualTime = c(0, 24, 30, 40), accrualIntensity = c(30, 45, 55, 66), maxNumberOfSubjects = 720), "Last 2 accrual time values (30, 40) ignored", fixed = TRUE )) suppressWarnings(expect_warning(getAccrualTime(accrualTime = c(0, 24, 30, 40), accrualIntensity = c(30, 45, 55, 66), maxNumberOfSubjects = 720), "Last 3 accrual intensity values (45, 55, 66) ignored", fixed = TRUE )) expect_warning(getAccrualTime(accrualTime = c(0, 6, 15, 25), accrualIntensity = c(0, 22, 33)), "It makes no sense to start 'accrualIntensity' (0, 22, 33) with 0", fixed = TRUE ) expect_error(getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0)), "Illegal argument: at least one 'accrualIntensity' value must be > 0", fixed = TRUE ) expect_error(getAccrualTime( accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000 ), paste0( "Conflicting arguments: 'maxNumberOfSubjects' (1000) disagrees with the defined ", "accrual time (0, 6, 30) and intensity: 6 * 22 + 24 * 33 = 924" ), fixed = TRUE ) }) test_that("Testing 'getAccrualTime': list-wise definition", { accrualTime1 <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45 ) # @refFS[Tab.]{fs:tab:output:getAccrualTime} accrualTime4 <- getAccrualTime(accrualTime = accrualTime1, maxNumberOfSubjects = 1405) expect_equal(accrualTime4$accrualTime, c(0, 12, 13, 14, 15, 16, 40.55555556)) expect_equal(accrualTime4$accrualIntensity, c(15, 21, 27, 33, 39, 45)) expect_equal(accrualTime4$remainingTime, 24.55555556) .skipTestIfDisabled() accrualTime2 <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, "16 - ?" = 45 ) accrualTime5 <- getAccrualTime(accrualTime = accrualTime2, maxNumberOfSubjects = 1405) expect_equal(accrualTime5$accrualTime, c(0, 12, 13, 14, 15, 16, 40.55555556)) expect_equal(accrualTime5$accrualIntensity, c(15, 21, 27, 33, 39, 45)) expect_equal(accrualTime5$remainingTime, 24.55555556) accrualTime3 <- list( "0 - <11" = 20, "11 - <16" = 40, ">=16" = 60 ) accrualTime6 <- getAccrualTime(accrualTime = accrualTime3, maxNumberOfSubjects = 800) expect_equal(accrualTime6$accrualTime, c(0, 11, 16, 22.3333333)) expect_equal(accrualTime6$accrualIntensity, c(20, 40, 60)) expect_equal(accrualTime6$remainingTime, 6.33333333) accrualTime7 <- list( "0 - <11" = 20, "11 - <16" = 40, "16 - ?" = 60 ) accrualTime8 <- getAccrualTime(accrualTime = accrualTime7, maxNumberOfSubjects = 800) expect_equal(accrualTime8$accrualTime, c(0, 11, 16, 22.3333333)) expect_equal(accrualTime8$accrualIntensity, c(20, 40, 60)) expect_equal(accrualTime8$remainingTime, 6.33333333) }) test_that("Testing 'getPiecewiseSurvivalTime': mixed arguments", { # @refFS[Tab.]{fs:tab:output:getPiecewiseSurvivalTime} pwSurvivalTime1 <- getPiecewiseSurvivalTime(median1 = 37, hazardRatio = 0.8) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime1' with expected results expect_equal(pwSurvivalTime1$piecewiseSurvivalTime, NA_real_) expect_equal(pwSurvivalTime1$lambda1, 0.018733708, tolerance = 1e-07) expect_equal(pwSurvivalTime1$lambda2, 0.023417134, tolerance = 1e-07) expect_equal(pwSurvivalTime1$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime1$pi1, NA_real_) expect_equal(pwSurvivalTime1$pi2, NA_real_) expect_equal(pwSurvivalTime1$median1, 37) expect_equal(pwSurvivalTime1$median2, 29.6, tolerance = 1e-07) expect_equal(pwSurvivalTime1$eventTime, NA_real_) expect_equal(pwSurvivalTime1$kappa, 1) expect_equal(pwSurvivalTime1$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime1$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime1$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime1), NA))) expect_output(print(pwSurvivalTime1)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime1), NA))) expect_output(summary(pwSurvivalTime1)$show()) pwSurvivalTime1CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime1, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime1CodeBased$piecewiseSurvivalTime, pwSurvivalTime1$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$lambda1, pwSurvivalTime1$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$lambda2, pwSurvivalTime1$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$hazardRatio, pwSurvivalTime1$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$pi1, pwSurvivalTime1$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$pi2, pwSurvivalTime1$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$median1, pwSurvivalTime1$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$median2, pwSurvivalTime1$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$eventTime, pwSurvivalTime1$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$kappa, pwSurvivalTime1$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime1$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$delayedResponseAllowed, pwSurvivalTime1$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime1CodeBased$delayedResponseEnabled, pwSurvivalTime1$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime1), "character") df <- as.data.frame(pwSurvivalTime1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime2 <- getPiecewiseSurvivalTime(lambda1 = 0.01873371, median2 = 29.6) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime2' with expected results expect_equal(pwSurvivalTime2$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime2$lambda1, 0.01873371, tolerance = 1e-07) expect_equal(pwSurvivalTime2$lambda2, 0.023417134, tolerance = 1e-07) expect_equal(pwSurvivalTime2$hazardRatio, 0.8000001, tolerance = 1e-07) expect_equal(pwSurvivalTime2$pi1, NA_real_) expect_equal(pwSurvivalTime2$pi2, NA_real_) expect_equal(pwSurvivalTime2$median1, 36.999995, tolerance = 1e-07) expect_equal(pwSurvivalTime2$median2, 29.6, tolerance = 1e-07) expect_equal(pwSurvivalTime2$eventTime, NA_real_) expect_equal(pwSurvivalTime2$kappa, 1) expect_equal(pwSurvivalTime2$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime2$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime2$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime2), NA))) expect_output(print(pwSurvivalTime2)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime2), NA))) expect_output(summary(pwSurvivalTime2)$show()) pwSurvivalTime2CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime2, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime2CodeBased$piecewiseSurvivalTime, pwSurvivalTime2$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$lambda1, pwSurvivalTime2$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$lambda2, pwSurvivalTime2$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$hazardRatio, pwSurvivalTime2$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$pi1, pwSurvivalTime2$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$pi2, pwSurvivalTime2$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$median1, pwSurvivalTime2$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$median2, pwSurvivalTime2$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$eventTime, pwSurvivalTime2$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$kappa, pwSurvivalTime2$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime2$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$delayedResponseAllowed, pwSurvivalTime2$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime2CodeBased$delayedResponseEnabled, pwSurvivalTime2$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime2), "character") df <- as.data.frame(pwSurvivalTime2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } pwSurvivalTime3 <- getPiecewiseSurvivalTime(median1 = 37, lambda2 = 0.02341713) ## Comparison of the results of PiecewiseSurvivalTime object 'pwSurvivalTime3' with expected results expect_equal(pwSurvivalTime3$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime3$lambda1, 0.018733708, tolerance = 1e-07) expect_equal(pwSurvivalTime3$lambda2, 0.02341713, tolerance = 1e-07) expect_equal(pwSurvivalTime3$hazardRatio, 0.80000015, tolerance = 1e-07) expect_equal(pwSurvivalTime3$pi1, NA_real_) expect_equal(pwSurvivalTime3$pi2, NA_real_) expect_equal(pwSurvivalTime3$median1, 37) expect_equal(pwSurvivalTime3$median2, 29.600006, tolerance = 1e-07) expect_equal(pwSurvivalTime3$eventTime, NA_real_) expect_equal(pwSurvivalTime3$kappa, 1) expect_equal(pwSurvivalTime3$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime3$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime3$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime3), NA))) expect_output(print(pwSurvivalTime3)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime3), NA))) expect_output(summary(pwSurvivalTime3)$show()) pwSurvivalTime3CodeBased <- eval(parse(text = getObjectRCode(pwSurvivalTime3, stringWrapParagraphWidth = NULL))) expect_equal(pwSurvivalTime3CodeBased$piecewiseSurvivalTime, pwSurvivalTime3$piecewiseSurvivalTime, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$lambda1, pwSurvivalTime3$lambda1, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$lambda2, pwSurvivalTime3$lambda2, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$hazardRatio, pwSurvivalTime3$hazardRatio, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$pi1, pwSurvivalTime3$pi1, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$pi2, pwSurvivalTime3$pi2, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$median1, pwSurvivalTime3$median1, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$median2, pwSurvivalTime3$median2, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$eventTime, pwSurvivalTime3$eventTime, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$kappa, pwSurvivalTime3$kappa, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$piecewiseSurvivalEnabled, pwSurvivalTime3$piecewiseSurvivalEnabled, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$delayedResponseAllowed, pwSurvivalTime3$delayedResponseAllowed, tolerance = 1e-05) expect_equal(pwSurvivalTime3CodeBased$delayedResponseEnabled, pwSurvivalTime3$delayedResponseEnabled, tolerance = 1e-05) expect_type(names(pwSurvivalTime3), "character") df <- as.data.frame(pwSurvivalTime3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(pwSurvivalTime3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } expect_warning(getPiecewiseSurvivalTime(hazardRatio = c(0.6, 0.8), pi1 = 0.3), "'hazardRatio' (0.6, 0.8) will be ignored because it will be calculated", fixed = TRUE ) }) rpact/tests/testthat/test-f_core_utilities.R0000644000176200001440000024307114370207346021032 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_core_utilities.R ## | Creation date: 06 February 2023, 12:11:55 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Result Object Print Output") test_that("The output does not contain any issues", { expect_equal(sum(grepl("ISSUES", capture.output(getDesignGroupSequential()$show()))), 0) expect_equal(sum(grepl("ISSUES", capture.output(getDesignInverseNormal(kMax = 4)$show()))), 0) expect_equal(sum(grepl("ISSUES", capture.output(getDesignFisher()$show()))), 0) expect_equal(sum(grepl("ISSUES", capture.output(getSampleSizeMeans(getDesignGroupSequential())$show()))), 0) expect_equal(sum(grepl("ISSUES", capture.output(getSampleSizeRates()$show()))), 0) expect_equal(sum(grepl("ISSUES", capture.output(getSampleSizeSurvival(getDesignInverseNormal(kMax = 2))$show()))), 0) }) test_plan_section("Testing Core Utility Functions") test_that("'getValidatedInformationRates': 'informationRates' must be generated correctly based on specified 'kMax'", { .skipTestIfDisabled() design1 <- getTestDesign(kMax = 1L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design1), 1, tolerance = 1e-08) design2 <- getTestDesign(kMax = 2L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design2), c(0.5, 1), tolerance = 1e-08) design3 <- getTestDesign(kMax = 3L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design3), c(0.33333333, 0.66666667, 1), tolerance = 1e-08) design4 <- getTestDesign(kMax = 4L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design4), c(0.25, 0.5, 0.75, 1), tolerance = 1e-08) design5 <- getTestDesign(kMax = 5L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design5), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-08) design6 <- getTestDesign(kMax = 6L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design6), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-08) design7 <- getTestDesign(kMax = 1L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design7), 1, tolerance = 1e-08) design8 <- getTestDesign(kMax = 2L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design8), c(0.5, 1), tolerance = 1e-08) design9 <- getTestDesign(kMax = 3L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design9), c(0.33333333, 0.66666667, 1), tolerance = 1e-08) design10 <- getTestDesign(kMax = 4L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design10), c(0.25, 0.5, 0.75, 1), tolerance = 1e-08) design11 <- getTestDesign(kMax = 5L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design11), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-08) design12 <- getTestDesign(kMax = 6L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design12), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-08) design13 <- getTestDesign(kMax = 1L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design13), 1, tolerance = 1e-08) design14 <- getTestDesign(kMax = 2L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design14), c(0.5, 1), tolerance = 1e-08) design15 <- getTestDesign(kMax = 3L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design15), c(0.33333333, 0.66666667, 1), tolerance = 1e-08) design16 <- getTestDesign(kMax = 4L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design16), c(0.25, 0.5, 0.75, 1), tolerance = 1e-08) design17 <- getTestDesign(kMax = 5L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design17), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-08) design18 <- getTestDesign(kMax = 6L, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design18), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-08) }) test_that("'getValidatedInformationRates': 'informationRates' must be set correctly based on specified 'informationRates'", { .skipTestIfDisabled() design19 <- getTestDesign(informationRates = 1, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design19), 1, tolerance = 1e-07) design20 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design20), c(0.4, 1), tolerance = 1e-07) design21 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design21), c(0.26666667, 0.53333333, 1), tolerance = 1e-07) design22 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design22), c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) design23 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design23), c(0.16, 0.32, 0.48, 0.64, 1), tolerance = 1e-07) design24 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design24), c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), tolerance = 1e-07) design25 <- getTestDesign(informationRates = 1, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design25), 1, tolerance = 1e-07) design26 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design26), c(0.4, 1), tolerance = 1e-07) design27 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design27), c(0.26666667, 0.53333333, 1), tolerance = 1e-07) design28 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design28), c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) design29 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design29), c(0.16, 0.32, 0.48, 0.64, 1), tolerance = 1e-07) design30 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design30), c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), tolerance = 1e-07) design31 <- getTestDesign(informationRates = 1, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design31), 1, tolerance = 1e-07) design32 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design32), c(0.4, 1), tolerance = 1e-07) design33 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design33), c(0.26666667, 0.53333333, 1), tolerance = 1e-07) design34 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design34), c(0.2, 0.4, 0.6, 1), tolerance = 1e-07) design35 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design35), c(0.16, 0.32, 0.48, 0.64, 1), tolerance = 1e-07) design36 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design36), c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), tolerance = 1e-07) design37 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design37), c(0.5, 1), tolerance = 1e-07) design38 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design38), c(0.33333333, 0.66666667, 1), tolerance = 1e-07) design39 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design39), c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) design40 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design40), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-07) design41 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedInformationRates(design41), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-07) design42 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design42), c(0.5, 1), tolerance = 1e-07) design43 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design43), c(0.33333333, 0.66666667, 1), tolerance = 1e-07) design44 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design44), c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) design45 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design45), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-07) design46 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedInformationRates(design46), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-07) design47 <- getTestDesign(futilityBounds = 0.5, designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design47), c(0.5, 1), tolerance = 1e-07) design48 <- getTestDesign(futilityBounds = c(0.5, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design48), c(0.33333333, 0.66666667, 1), tolerance = 1e-07) design49 <- getTestDesign(futilityBounds = c(0.01, 0.5, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design49), c(0.25, 0.5, 0.75, 1), tolerance = 1e-07) design50 <- getTestDesign(futilityBounds = c(0.01, 0.01, 0.5, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design50), c(0.2, 0.4, 0.6, 0.8, 1), tolerance = 1e-07) design51 <- getTestDesign(futilityBounds = c(0.01, 0.01, 0.01, 0.5, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedInformationRates(design51), c(0.16666667, 0.33333333, 0.5, 0.66666667, 0.83333333, 1), tolerance = 1e-07) }) test_that("'getValidatedInformationRates': 'kMax' must be set correctly based on specified 'informationRates'", { .skipTestIfDisabled() design52 <- getTestDesign(informationRates = 1, designClass = "TrialDesignGroupSequential") expect_equal(design52$kMax, 1, tolerance = 1e-07) design53 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") expect_equal(design53$kMax, 2, tolerance = 1e-07) design54 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") expect_equal(design54$kMax, 3, tolerance = 1e-07) design55 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") expect_equal(design55$kMax, 4, tolerance = 1e-07) design56 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") expect_equal(design56$kMax, 5, tolerance = 1e-07) design57 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignGroupSequential") expect_equal(design57$kMax, 6, tolerance = 1e-07) design58 <- getTestDesign(informationRates = 1, designClass = "TrialDesignInverseNormal") expect_equal(design58$kMax, 1, tolerance = 1e-07) design59 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") expect_equal(design59$kMax, 2, tolerance = 1e-07) design60 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") expect_equal(design60$kMax, 3, tolerance = 1e-07) design61 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") expect_equal(design61$kMax, 4, tolerance = 1e-07) design62 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") expect_equal(design62$kMax, 5, tolerance = 1e-07) design63 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignInverseNormal") expect_equal(design63$kMax, 6, tolerance = 1e-07) design64 <- getTestDesign(informationRates = 1, designClass = "TrialDesignFisher") expect_equal(design64$kMax, 1, tolerance = 1e-07) design65 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") expect_equal(design65$kMax, 2, tolerance = 1e-07) design66 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") expect_equal(design66$kMax, 3, tolerance = 1e-07) design67 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") expect_equal(design67$kMax, 4, tolerance = 1e-07) design68 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") expect_equal(design68$kMax, 5, tolerance = 1e-07) design69 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignFisher") expect_equal(design69$kMax, 6, tolerance = 1e-07) design70 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") expect_equal(design70$kMax, 2, tolerance = 1e-07) design71 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") expect_equal(design71$kMax, 3, tolerance = 1e-07) design72 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(design72$kMax, 4, tolerance = 1e-07) design73 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(design73$kMax, 5, tolerance = 1e-07) design74 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(design74$kMax, 6, tolerance = 1e-07) design75 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") expect_equal(design75$kMax, 2, tolerance = 1e-07) design76 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") expect_equal(design76$kMax, 3, tolerance = 1e-07) design77 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(design77$kMax, 4, tolerance = 1e-07) design78 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(design78$kMax, 5, tolerance = 1e-07) design79 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(design79$kMax, 6, tolerance = 1e-07) design80 <- getTestDesign(futilityBounds = 0.5, designClass = "TrialDesignFisher") expect_equal(design80$kMax, 2, tolerance = 1e-07) design81 <- getTestDesign(futilityBounds = c(0.5, 1), designClass = "TrialDesignFisher") expect_equal(design81$kMax, 3, tolerance = 1e-07) design82 <- getTestDesign(futilityBounds = c(0.01, 0.5, 1), designClass = "TrialDesignFisher") expect_equal(design82$kMax, 4, tolerance = 1e-07) design83 <- getTestDesign(futilityBounds = c(0.01, 0.01, 0.5, 1), designClass = "TrialDesignFisher") expect_equal(design83$kMax, 5, tolerance = 1e-07) design84 <- getTestDesign(futilityBounds = c(0.01, 0.01, 0.01, 0.5, 1), designClass = "TrialDesignFisher") expect_equal(design84$kMax, 6, tolerance = 1e-07) }) test_that("'getValidatedInformationRates': 'futilityBounds' must be generated correctly based on specified 'kMax'", { .skipTestIfDisabled() design85 <- getTestDesign(kMax = 1L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design85), numeric(0), tolerance = 1e-08) design86 <- getTestDesign(kMax = 2L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design86), -6, tolerance = 1e-08) design87 <- getTestDesign(kMax = 3L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design87), c(-6, -6), tolerance = 1e-08) design88 <- getTestDesign(kMax = 4L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design88), c(-6, -6, -6), tolerance = 1e-08) design89 <- getTestDesign(kMax = 5L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design89), c(-6, -6, -6, -6), tolerance = 1e-08) design90 <- getTestDesign(kMax = 6L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design90), c(-6, -6, -6, -6, -6), tolerance = 1e-08) design91 <- getTestDesign(kMax = 7L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design91), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-08) design92 <- getTestDesign(kMax = 8L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design92), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design93 <- getTestDesign(kMax = 9L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design93), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design94 <- getTestDesign(kMax = 10L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design94), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design95 <- getTestDesign(kMax = 11L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design95), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design96 <- getTestDesign(kMax = 12L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design96), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design97 <- getTestDesign(kMax = 13L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design97), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design98 <- getTestDesign(kMax = 14L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design98), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design99 <- getTestDesign(kMax = 15L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design99), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design100 <- getTestDesign(kMax = 16L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design100), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design101 <- getTestDesign(kMax = 17L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design101), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design102 <- getTestDesign(kMax = 18L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design102), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design103 <- getTestDesign(kMax = 19L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design103), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design104 <- getTestDesign(kMax = 20L, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design104), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design105 <- getTestDesign(kMax = 1L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design105), numeric(0), tolerance = 1e-08) design106 <- getTestDesign(kMax = 2L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design106), -6, tolerance = 1e-08) design107 <- getTestDesign(kMax = 3L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design107), c(-6, -6), tolerance = 1e-08) design108 <- getTestDesign(kMax = 4L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design108), c(-6, -6, -6), tolerance = 1e-08) design109 <- getTestDesign(kMax = 5L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design109), c(-6, -6, -6, -6), tolerance = 1e-08) design110 <- getTestDesign(kMax = 6L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design110), c(-6, -6, -6, -6, -6), tolerance = 1e-08) design111 <- getTestDesign(kMax = 7L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design111), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-08) design112 <- getTestDesign(kMax = 8L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design112), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design113 <- getTestDesign(kMax = 9L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design113), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design114 <- getTestDesign(kMax = 10L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design114), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design115 <- getTestDesign(kMax = 11L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design115), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design116 <- getTestDesign(kMax = 12L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design116), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design117 <- getTestDesign(kMax = 13L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design117), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design118 <- getTestDesign(kMax = 14L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design118), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design119 <- getTestDesign(kMax = 15L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design119), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design120 <- getTestDesign(kMax = 16L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design120), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design121 <- getTestDesign(kMax = 17L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design121), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design122 <- getTestDesign(kMax = 18L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design122), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design123 <- getTestDesign(kMax = 19L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design123), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design124 <- getTestDesign(kMax = 20L, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design124), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-08) design125 <- getTestDesign(kMax = 1L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design125), numeric(0), tolerance = 1e-08) design126 <- getTestDesign(kMax = 2L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design126), 1, tolerance = 1e-08) design127 <- getTestDesign(kMax = 3L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design127), c(1, 1), tolerance = 1e-08) design128 <- getTestDesign(kMax = 4L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design128), c(1, 1, 1), tolerance = 1e-08) design129 <- getTestDesign(kMax = 5L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design129), c(1, 1, 1, 1), tolerance = 1e-08) design130 <- getTestDesign(kMax = 6L, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design130), c(1, 1, 1, 1, 1), tolerance = 1e-08) }) test_that("'getValidatedInformationRates': 'futilityBounds' must be set correctly based on specified 'futilityBounds'", { .skipTestIfDisabled() design131 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design131), 2, tolerance = 1e-07) design132 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design132), c(1, 2), tolerance = 1e-07) design133 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design133), c(0, 1, 2), tolerance = 1e-07) design134 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design134), c(0, 0, 1, 2), tolerance = 1e-07) design135 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design135), c(0, 0, 0, 1, 2), tolerance = 1e-07) design136 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design136), c(0, 0, 0, 0, 1, 2), tolerance = 1e-07) design137 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design137), c(0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design138 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design138), c(0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design139 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design139), c(0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design140 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design140), c(0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design141 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design141), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design142 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design142), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design143 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design143), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design144 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design144), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design145 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design145), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design146 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design146), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design147 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design147), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design148 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design148), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design149 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design149), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design150 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design150), 2, tolerance = 1e-07) design151 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design151), c(1, 2), tolerance = 1e-07) design152 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design152), c(0, 1, 2), tolerance = 1e-07) design153 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design153), c(0, 0, 1, 2), tolerance = 1e-07) design154 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design154), c(0, 0, 0, 1, 2), tolerance = 1e-07) design155 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design155), c(0, 0, 0, 0, 1, 2), tolerance = 1e-07) design156 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design156), c(0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design157 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design157), c(0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design158 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design158), c(0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design159 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design159), c(0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design160 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design160), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design161 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design161), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design162 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design162), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design163 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design163), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design164 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design164), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design165 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design165), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design166 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design166), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design167 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design167), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design168 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design168), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), tolerance = 1e-07) design169 <- getTestDesign(futilityBounds = 0.5, designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design169), 0.5, tolerance = 1e-07) design170 <- getTestDesign(futilityBounds = c(0.5, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design170), c(0.5, 1), tolerance = 1e-07) design171 <- getTestDesign(futilityBounds = c(0.01, 0.5, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design171), c(0.01, 0.5, 1), tolerance = 1e-07) design172 <- getTestDesign(futilityBounds = c(0.01, 0.01, 0.5, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design172), c(0.01, 0.01, 0.5, 1), tolerance = 1e-07) design173 <- getTestDesign(futilityBounds = c(0.01, 0.01, 0.01, 0.5, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design173), c(0.01, 0.01, 0.01, 0.5, 1), tolerance = 1e-07) design174 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design174), -6, tolerance = 1e-07) design175 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design175), c(-6, -6), tolerance = 1e-07) design176 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design176), c(-6, -6, -6), tolerance = 1e-07) design177 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design177), c(-6, -6, -6, -6), tolerance = 1e-07) design178 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design178), c(-6, -6, -6, -6, -6), tolerance = 1e-07) design179 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design179), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-07) design180 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design180), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design181 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design181), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design182 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design182), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design183 <- getTestDesign(informationRates = c(0.072727273, 0.14545455, 0.21818182, 0.29090909, 0.36363636, 0.43636364, 0.50909091, 0.58181818, 0.65454545, 0.72727273, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design183), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design184 <- getTestDesign(informationRates = c(0.066666667, 0.13333333, 0.2, 0.26666667, 0.33333333, 0.4, 0.46666667, 0.53333333, 0.6, 0.66666667, 0.73333333, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design184), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design185 <- getTestDesign(informationRates = c(0.061538462, 0.12307692, 0.18461538, 0.24615385, 0.30769231, 0.36923077, 0.43076923, 0.49230769, 0.55384615, 0.61538462, 0.67692308, 0.73846154, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design185), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design186 <- getTestDesign(informationRates = c(0.057142857, 0.11428571, 0.17142857, 0.22857143, 0.28571429, 0.34285714, 0.4, 0.45714286, 0.51428571, 0.57142857, 0.62857143, 0.68571429, 0.74285714, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design186), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design187 <- getTestDesign(informationRates = c(0.053333333, 0.10666667, 0.16, 0.21333333, 0.26666667, 0.32, 0.37333333, 0.42666667, 0.48, 0.53333333, 0.58666667, 0.64, 0.69333333, 0.74666667, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design187), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design188 <- getTestDesign(informationRates = c(0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design188), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design189 <- getTestDesign(informationRates = c(0.047058824, 0.094117647, 0.14117647, 0.18823529, 0.23529412, 0.28235294, 0.32941176, 0.37647059, 0.42352941, 0.47058824, 0.51764706, 0.56470588, 0.61176471, 0.65882353, 0.70588235, 0.75294118, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design189), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design190 <- getTestDesign(informationRates = c(0.044444444, 0.088888889, 0.13333333, 0.17777778, 0.22222222, 0.26666667, 0.31111111, 0.35555556, 0.4, 0.44444444, 0.48888889, 0.53333333, 0.57777778, 0.62222222, 0.66666667, 0.71111111, 0.75555556, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design190), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design191 <- getTestDesign(informationRates = c(0.042105263, 0.084210526, 0.12631579, 0.16842105, 0.21052632, 0.25263158, 0.29473684, 0.33684211, 0.37894737, 0.42105263, 0.46315789, 0.50526316, 0.54736842, 0.58947368, 0.63157895, 0.67368421, 0.71578947, 0.75789474, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design191), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design192 <- getTestDesign(informationRates = c(0.04, 0.08, 0.12, 0.16, 0.2, 0.24, 0.28, 0.32, 0.36, 0.4, 0.44, 0.48, 0.52, 0.56, 0.6, 0.64, 0.68, 0.72, 0.76, 1), designClass = "TrialDesignGroupSequential") expect_equal(.getValidatedFutilityBounds(design192), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design193 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design193), -6, tolerance = 1e-07) design194 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design194), c(-6, -6), tolerance = 1e-07) design195 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design195), c(-6, -6, -6), tolerance = 1e-07) design196 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design196), c(-6, -6, -6, -6), tolerance = 1e-07) design197 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design197), c(-6, -6, -6, -6, -6), tolerance = 1e-07) design198 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design198), c(-6, -6, -6, -6, -6, -6), tolerance = 1e-07) design199 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design199), c(-6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design200 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design200), c(-6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design201 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design201), c(-6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design202 <- getTestDesign(informationRates = c(0.072727273, 0.14545455, 0.21818182, 0.29090909, 0.36363636, 0.43636364, 0.50909091, 0.58181818, 0.65454545, 0.72727273, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design202), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design203 <- getTestDesign(informationRates = c(0.066666667, 0.13333333, 0.2, 0.26666667, 0.33333333, 0.4, 0.46666667, 0.53333333, 0.6, 0.66666667, 0.73333333, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design203), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design204 <- getTestDesign(informationRates = c(0.061538462, 0.12307692, 0.18461538, 0.24615385, 0.30769231, 0.36923077, 0.43076923, 0.49230769, 0.55384615, 0.61538462, 0.67692308, 0.73846154, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design204), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design205 <- getTestDesign(informationRates = c(0.057142857, 0.11428571, 0.17142857, 0.22857143, 0.28571429, 0.34285714, 0.4, 0.45714286, 0.51428571, 0.57142857, 0.62857143, 0.68571429, 0.74285714, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design205), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design206 <- getTestDesign(informationRates = c(0.053333333, 0.10666667, 0.16, 0.21333333, 0.26666667, 0.32, 0.37333333, 0.42666667, 0.48, 0.53333333, 0.58666667, 0.64, 0.69333333, 0.74666667, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design206), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design207 <- getTestDesign(informationRates = c(0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design207), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design208 <- getTestDesign(informationRates = c(0.047058824, 0.094117647, 0.14117647, 0.18823529, 0.23529412, 0.28235294, 0.32941176, 0.37647059, 0.42352941, 0.47058824, 0.51764706, 0.56470588, 0.61176471, 0.65882353, 0.70588235, 0.75294118, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design208), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design209 <- getTestDesign(informationRates = c(0.044444444, 0.088888889, 0.13333333, 0.17777778, 0.22222222, 0.26666667, 0.31111111, 0.35555556, 0.4, 0.44444444, 0.48888889, 0.53333333, 0.57777778, 0.62222222, 0.66666667, 0.71111111, 0.75555556, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design209), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design210 <- getTestDesign(informationRates = c(0.042105263, 0.084210526, 0.12631579, 0.16842105, 0.21052632, 0.25263158, 0.29473684, 0.33684211, 0.37894737, 0.42105263, 0.46315789, 0.50526316, 0.54736842, 0.58947368, 0.63157895, 0.67368421, 0.71578947, 0.75789474, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design210), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design211 <- getTestDesign(informationRates = c(0.04, 0.08, 0.12, 0.16, 0.2, 0.24, 0.28, 0.32, 0.36, 0.4, 0.44, 0.48, 0.52, 0.56, 0.6, 0.64, 0.68, 0.72, 0.76, 1), designClass = "TrialDesignInverseNormal") expect_equal(.getValidatedFutilityBounds(design211), c(-6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6, -6), tolerance = 1e-07) design212 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design212), 1, tolerance = 1e-07) design213 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design213), c(1, 1), tolerance = 1e-07) design214 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design214), c(1, 1, 1), tolerance = 1e-07) design215 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design215), c(1, 1, 1, 1), tolerance = 1e-07) design216 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignFisher") expect_equal(.getValidatedAlpha0Vec(design216), c(1, 1, 1, 1, 1), tolerance = 1e-07) }) test_that("'getValidatedInformationRates': 'kMax' must be set correctly based on specified 'futilityBounds'", { .skipTestIfDisabled() design217 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design217) expect_equal(design217$kMax, 2, tolerance = 1e-07) design218 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design218) expect_equal(design218$kMax, 3, tolerance = 1e-07) design219 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design219) expect_equal(design219$kMax, 4, tolerance = 1e-07) design220 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design220) expect_equal(design220$kMax, 5, tolerance = 1e-07) design221 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design221) expect_equal(design221$kMax, 6, tolerance = 1e-07) design222 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design222) expect_equal(design222$kMax, 7, tolerance = 1e-07) design223 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design223) expect_equal(design223$kMax, 8, tolerance = 1e-07) design224 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design224) expect_equal(design224$kMax, 9, tolerance = 1e-07) design225 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design225) expect_equal(design225$kMax, 10, tolerance = 1e-07) design226 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design226) expect_equal(design226$kMax, 11, tolerance = 1e-07) design227 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design227) expect_equal(design227$kMax, 12, tolerance = 1e-07) design228 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design228) expect_equal(design228$kMax, 13, tolerance = 1e-07) design229 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design229) expect_equal(design229$kMax, 14, tolerance = 1e-07) design230 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design230) expect_equal(design230$kMax, 15, tolerance = 1e-07) design231 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design231) expect_equal(design231$kMax, 16, tolerance = 1e-07) design232 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design232) expect_equal(design232$kMax, 17, tolerance = 1e-07) design233 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design233) expect_equal(design233$kMax, 18, tolerance = 1e-07) design234 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design234) expect_equal(design234$kMax, 19, tolerance = 1e-07) design235 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design235) expect_equal(design235$kMax, 20, tolerance = 1e-07) design236 <- getTestDesign(futilityBounds = 2, designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design236) expect_equal(design236$kMax, 2, tolerance = 1e-07) design237 <- getTestDesign(futilityBounds = c(1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design237) expect_equal(design237$kMax, 3, tolerance = 1e-07) design238 <- getTestDesign(futilityBounds = c(0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design238) expect_equal(design238$kMax, 4, tolerance = 1e-07) design239 <- getTestDesign(futilityBounds = c(0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design239) expect_equal(design239$kMax, 5, tolerance = 1e-07) design240 <- getTestDesign(futilityBounds = c(0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design240) expect_equal(design240$kMax, 6, tolerance = 1e-07) design241 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design241) expect_equal(design241$kMax, 7, tolerance = 1e-07) design242 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design242) expect_equal(design242$kMax, 8, tolerance = 1e-07) design243 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design243) expect_equal(design243$kMax, 9, tolerance = 1e-07) design244 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design244) expect_equal(design244$kMax, 10, tolerance = 1e-07) design245 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design245) expect_equal(design245$kMax, 11, tolerance = 1e-07) design246 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design246) expect_equal(design246$kMax, 12, tolerance = 1e-07) design247 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design247) expect_equal(design247$kMax, 13, tolerance = 1e-07) design248 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design248) expect_equal(design248$kMax, 14, tolerance = 1e-07) design249 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design249) expect_equal(design249$kMax, 15, tolerance = 1e-07) design250 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design250) expect_equal(design250$kMax, 16, tolerance = 1e-07) design251 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design251) expect_equal(design251$kMax, 17, tolerance = 1e-07) design252 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design252) expect_equal(design252$kMax, 18, tolerance = 1e-07) design253 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design253) expect_equal(design253$kMax, 19, tolerance = 1e-07) design254 <- getTestDesign(futilityBounds = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design254) expect_equal(design254$kMax, 20, tolerance = 1e-07) design255 <- getTestDesign(futilityBounds = 0.5, designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design255) expect_equal(design255$kMax, 2, tolerance = 1e-07) design256 <- getTestDesign(futilityBounds = c(0.5, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design256) expect_equal(design256$kMax, 3, tolerance = 1e-07) design257 <- getTestDesign(futilityBounds = c(0.01, 0.5, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design257) expect_equal(design257$kMax, 4, tolerance = 1e-07) design258 <- getTestDesign(futilityBounds = c(0.01, 0.01, 0.5, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design258) expect_equal(design258$kMax, 5, tolerance = 1e-07) design259 <- getTestDesign(futilityBounds = c(0.01, 0.01, 0.01, 0.5, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design259) expect_equal(design259$kMax, 6, tolerance = 1e-07) design260 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design260) expect_equal(design260$kMax, 2, tolerance = 1e-07) design261 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design261) expect_equal(design261$kMax, 3, tolerance = 1e-07) design262 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design262) expect_equal(design262$kMax, 4, tolerance = 1e-07) design263 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design263) expect_equal(design263$kMax, 5, tolerance = 1e-07) design264 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design264) expect_equal(design264$kMax, 6, tolerance = 1e-07) design265 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design265) expect_equal(design265$kMax, 7, tolerance = 1e-07) design266 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design266) expect_equal(design266$kMax, 8, tolerance = 1e-07) design267 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design267) expect_equal(design267$kMax, 9, tolerance = 1e-07) design268 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design268) expect_equal(design268$kMax, 10, tolerance = 1e-07) design269 <- getTestDesign(informationRates = c(0.072727273, 0.14545455, 0.21818182, 0.29090909, 0.36363636, 0.43636364, 0.50909091, 0.58181818, 0.65454545, 0.72727273, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design269) expect_equal(design269$kMax, 11, tolerance = 1e-07) design270 <- getTestDesign(informationRates = c(0.066666667, 0.13333333, 0.2, 0.26666667, 0.33333333, 0.4, 0.46666667, 0.53333333, 0.6, 0.66666667, 0.73333333, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design270) expect_equal(design270$kMax, 12, tolerance = 1e-07) design271 <- getTestDesign(informationRates = c(0.061538462, 0.12307692, 0.18461538, 0.24615385, 0.30769231, 0.36923077, 0.43076923, 0.49230769, 0.55384615, 0.61538462, 0.67692308, 0.73846154, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design271) expect_equal(design271$kMax, 13, tolerance = 1e-07) design272 <- getTestDesign(informationRates = c(0.057142857, 0.11428571, 0.17142857, 0.22857143, 0.28571429, 0.34285714, 0.4, 0.45714286, 0.51428571, 0.57142857, 0.62857143, 0.68571429, 0.74285714, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design272) expect_equal(design272$kMax, 14, tolerance = 1e-07) design273 <- getTestDesign(informationRates = c(0.053333333, 0.10666667, 0.16, 0.21333333, 0.26666667, 0.32, 0.37333333, 0.42666667, 0.48, 0.53333333, 0.58666667, 0.64, 0.69333333, 0.74666667, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design273) expect_equal(design273$kMax, 15, tolerance = 1e-07) design274 <- getTestDesign(informationRates = c(0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design274) expect_equal(design274$kMax, 16, tolerance = 1e-07) design275 <- getTestDesign(informationRates = c(0.047058824, 0.094117647, 0.14117647, 0.18823529, 0.23529412, 0.28235294, 0.32941176, 0.37647059, 0.42352941, 0.47058824, 0.51764706, 0.56470588, 0.61176471, 0.65882353, 0.70588235, 0.75294118, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design275) expect_equal(design275$kMax, 17, tolerance = 1e-07) design276 <- getTestDesign(informationRates = c(0.044444444, 0.088888889, 0.13333333, 0.17777778, 0.22222222, 0.26666667, 0.31111111, 0.35555556, 0.4, 0.44444444, 0.48888889, 0.53333333, 0.57777778, 0.62222222, 0.66666667, 0.71111111, 0.75555556, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design276) expect_equal(design276$kMax, 18, tolerance = 1e-07) design277 <- getTestDesign(informationRates = c(0.042105263, 0.084210526, 0.12631579, 0.16842105, 0.21052632, 0.25263158, 0.29473684, 0.33684211, 0.37894737, 0.42105263, 0.46315789, 0.50526316, 0.54736842, 0.58947368, 0.63157895, 0.67368421, 0.71578947, 0.75789474, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design277) expect_equal(design277$kMax, 19, tolerance = 1e-07) design278 <- getTestDesign(informationRates = c(0.04, 0.08, 0.12, 0.16, 0.2, 0.24, 0.28, 0.32, 0.36, 0.4, 0.44, 0.48, 0.52, 0.56, 0.6, 0.64, 0.68, 0.72, 0.76, 1), designClass = "TrialDesignGroupSequential") .getValidatedFutilityBounds(design278) expect_equal(design278$kMax, 20, tolerance = 1e-07) design279 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design279) expect_equal(design279$kMax, 2, tolerance = 1e-07) design280 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design280) expect_equal(design280$kMax, 3, tolerance = 1e-07) design281 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design281) expect_equal(design281$kMax, 4, tolerance = 1e-07) design282 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design282) expect_equal(design282$kMax, 5, tolerance = 1e-07) design283 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design283) expect_equal(design283$kMax, 6, tolerance = 1e-07) design284 <- getTestDesign(informationRates = c(0.11428571, 0.22857143, 0.34285714, 0.45714286, 0.57142857, 0.68571429, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design284) expect_equal(design284$kMax, 7, tolerance = 1e-07) design285 <- getTestDesign(informationRates = c(0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design285) expect_equal(design285$kMax, 8, tolerance = 1e-07) design286 <- getTestDesign(informationRates = c(0.088888889, 0.17777778, 0.26666667, 0.35555556, 0.44444444, 0.53333333, 0.62222222, 0.71111111, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design286) expect_equal(design286$kMax, 9, tolerance = 1e-07) design287 <- getTestDesign(informationRates = c(0.08, 0.16, 0.24, 0.32, 0.4, 0.48, 0.56, 0.64, 0.72, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design287) expect_equal(design287$kMax, 10, tolerance = 1e-07) design288 <- getTestDesign(informationRates = c(0.072727273, 0.14545455, 0.21818182, 0.29090909, 0.36363636, 0.43636364, 0.50909091, 0.58181818, 0.65454545, 0.72727273, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design288) expect_equal(design288$kMax, 11, tolerance = 1e-07) design289 <- getTestDesign(informationRates = c(0.066666667, 0.13333333, 0.2, 0.26666667, 0.33333333, 0.4, 0.46666667, 0.53333333, 0.6, 0.66666667, 0.73333333, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design289) expect_equal(design289$kMax, 12, tolerance = 1e-07) design290 <- getTestDesign(informationRates = c(0.061538462, 0.12307692, 0.18461538, 0.24615385, 0.30769231, 0.36923077, 0.43076923, 0.49230769, 0.55384615, 0.61538462, 0.67692308, 0.73846154, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design290) expect_equal(design290$kMax, 13, tolerance = 1e-07) design291 <- getTestDesign(informationRates = c(0.057142857, 0.11428571, 0.17142857, 0.22857143, 0.28571429, 0.34285714, 0.4, 0.45714286, 0.51428571, 0.57142857, 0.62857143, 0.68571429, 0.74285714, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design291) expect_equal(design291$kMax, 14, tolerance = 1e-07) design292 <- getTestDesign(informationRates = c(0.053333333, 0.10666667, 0.16, 0.21333333, 0.26666667, 0.32, 0.37333333, 0.42666667, 0.48, 0.53333333, 0.58666667, 0.64, 0.69333333, 0.74666667, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design292) expect_equal(design292$kMax, 15, tolerance = 1e-07) design293 <- getTestDesign(informationRates = c(0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design293) expect_equal(design293$kMax, 16, tolerance = 1e-07) design294 <- getTestDesign(informationRates = c(0.047058824, 0.094117647, 0.14117647, 0.18823529, 0.23529412, 0.28235294, 0.32941176, 0.37647059, 0.42352941, 0.47058824, 0.51764706, 0.56470588, 0.61176471, 0.65882353, 0.70588235, 0.75294118, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design294) expect_equal(design294$kMax, 17, tolerance = 1e-07) design295 <- getTestDesign(informationRates = c(0.044444444, 0.088888889, 0.13333333, 0.17777778, 0.22222222, 0.26666667, 0.31111111, 0.35555556, 0.4, 0.44444444, 0.48888889, 0.53333333, 0.57777778, 0.62222222, 0.66666667, 0.71111111, 0.75555556, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design295) expect_equal(design295$kMax, 18, tolerance = 1e-07) design296 <- getTestDesign(informationRates = c(0.042105263, 0.084210526, 0.12631579, 0.16842105, 0.21052632, 0.25263158, 0.29473684, 0.33684211, 0.37894737, 0.42105263, 0.46315789, 0.50526316, 0.54736842, 0.58947368, 0.63157895, 0.67368421, 0.71578947, 0.75789474, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design296) expect_equal(design296$kMax, 19, tolerance = 1e-07) design297 <- getTestDesign(informationRates = c(0.04, 0.08, 0.12, 0.16, 0.2, 0.24, 0.28, 0.32, 0.36, 0.4, 0.44, 0.48, 0.52, 0.56, 0.6, 0.64, 0.68, 0.72, 0.76, 1), designClass = "TrialDesignInverseNormal") .getValidatedFutilityBounds(design297) expect_equal(design297$kMax, 20, tolerance = 1e-07) design298 <- getTestDesign(informationRates = c(0.4, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design298) expect_equal(design298$kMax, 2, tolerance = 1e-07) design299 <- getTestDesign(informationRates = c(0.26666667, 0.53333333, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design299) expect_equal(design299$kMax, 3, tolerance = 1e-07) design300 <- getTestDesign(informationRates = c(0.2, 0.4, 0.6, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design300) expect_equal(design300$kMax, 4, tolerance = 1e-07) design301 <- getTestDesign(informationRates = c(0.16, 0.32, 0.48, 0.64, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design301) expect_equal(design301$kMax, 5, tolerance = 1e-07) design302 <- getTestDesign(informationRates = c(0.1333, 0.26667, 0.4, 0.53333, 0.8667, 1), designClass = "TrialDesignFisher") .getValidatedAlpha0Vec(design302) expect_equal(design302$kMax, 6, tolerance = 1e-07) }) test_plan_section("Testing Utilities") test_that("Testing '.moveValue'", { expect_equal(.moveValue(c("A", "B", "C", "D", "E"), "E", "B"), c("A", "B", "E", "C", "D")) expect_equal(.moveValue(c("A", "B", "C", "D", "E"), "E", "A"), c("A", "E", "B", "C", "D")) expect_equal(.moveValue(c("A", "B", "C", "D", "E"), "A", "E"), c("B", "C", "D", "E", "A")) expect_equal(.moveValue(c("A", "B", "C", "D", "E"), "E", "E"), c("A", "B", "C", "D", "E")) expect_equal(.moveValue(c("A", "B", "C", "D", "E"), "A", "A"), c("A", "B", "C", "D", "E")) expect_equal(.moveValue(c("A"), "A", "A"), c("A")) }) test_that("Testing '.toCapitalized'", { expect_equal(.toCapitalized("zip code"), "Zip Code") expect_equal(.toCapitalized("state of the art"), "State of the Art") expect_equal(.toCapitalized("final and count"), "Final and Count") expect_equal(.toCapitalized("Hazard Ratio"), "Hazard Ratio") expect_equal(.toCapitalized("hazard ratio function"), "Hazard Ratio Function") }) test_that("Testing '.formatCamelCase'", { expect_equal(.formatCamelCase("hazardRatio", title = TRUE), "Hazard Ratio") expect_equal(.formatCamelCase("hazardRatio and informationRates", title = TRUE), "Hazard Ratio and Information Rates") expect_equal(.formatCamelCase("hazardRatio", title = FALSE), "hazard ratio") expect_equal(.formatCamelCase(" hazardRatio ", title = TRUE), " Hazard Ratio ") expect_equal(.formatCamelCase("Hazard", title = TRUE), "Hazard") expect_equal(.formatCamelCase("hazard", title = TRUE), "Hazard") expect_equal(.formatCamelCase("hazard", title = FALSE), "hazard") expect_equal(.formatCamelCase("Hazard", title = FALSE), "hazard") expect_equal(.formatCamelCase("Hazard Ratio", title = TRUE), "Hazard Ratio") expect_equal(.formatCamelCase(" hazard ratio ", title = TRUE), " Hazard Ratio ") expect_equal(.formatCamelCase("HazardRatio", title = FALSE), "hazard ratio") }) test_that("Testing '.equalsRegexpIgnoreCase'", { expect_equal(.equalsRegexpIgnoreCase("stage2", "^stages?$"), FALSE) expect_equal(.equalsRegexpIgnoreCase("stage", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("stages", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("Stage", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("STAGES", "^stages?$"), TRUE) expect_equal(.equalsRegexpIgnoreCase("stages2", "^stages?$"), FALSE) expect_equal(.equalsRegexpIgnoreCase(" stages", "^stages?$"), FALSE) expect_equal(.equalsRegexpIgnoreCase("stages2", "stages?"), TRUE) expect_equal(.equalsRegexpIgnoreCase("1stage2", "stages?"), TRUE) }) test_that("Testing 'isUndefinedArgument' and 'isValidArgument'", { expect_equal(.isUndefinedArgument(NULL), TRUE) expect_equal(.isUndefinedArgument(numeric(0)), TRUE) expect_equal(.isUndefinedArgument(NA), TRUE) expect_equal(.isUndefinedArgument(NA_integer_), TRUE) expect_equal(.isUndefinedArgument(NA_real_), TRUE) expect_equal(.isUndefinedArgument(NA_complex_), TRUE) expect_equal(.isUndefinedArgument(NA_character_), TRUE) expect_equal(.isUndefinedArgument(c(NA, NA)), FALSE) expect_equal(.isUndefinedArgument(c(1, NA, NA)), FALSE) expect_equal(.isUndefinedArgument(c(NA, NA, 1)), FALSE) expect_equal(.isUndefinedArgument(1), FALSE) expect_equal(.isDefinedArgument(NULL), FALSE) expect_equal(.isDefinedArgument(numeric(0)), FALSE) expect_equal(.isDefinedArgument(NA), FALSE) expect_equal(.isDefinedArgument(NA_integer_), FALSE) expect_equal(.isDefinedArgument(NA_real_), FALSE) expect_equal(.isDefinedArgument(NA_complex_), FALSE) expect_equal(.isDefinedArgument(NA_character_), FALSE) expect_equal(.isDefinedArgument(c(NA, NA)), TRUE) expect_equal(.isDefinedArgument(c(1, NA, NA)), TRUE) expect_equal(.isDefinedArgument(c(NA, NA, 1)), TRUE) expect_equal(.isDefinedArgument(1), TRUE) expect_error(.isDefinedArgument(notExistingTestVariable, argumentExistsValidationEnabled = FALSE)) expect_error(.isDefinedArgument(notExistingTestVariable)) # skip_if_translated() # expect_error(.isDefinedArgument(notExistingTestVariable), # paste0("Missing argument: the object 'notExistingTestVariable' has not been defined anywhere. ", # "Please define it first, e.g., run 'notExistingTestVariable <- 1'"), fixed = TRUE) }) test_that("Result of 'setSeed(seed)' is working for different arguments, incl. NULL and NA", { # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} expect_false(is.null(.setSeed())) expect_false(is.na(.setSeed())) expect_true(is.numeric(.setSeed())) expect_false(is.null(.setSeed(NULL))) expect_false(is.na(.setSeed(NULL))) expect_true(is.numeric(.setSeed(NULL))) expect_false(is.null(.setSeed(NA))) expect_false(is.na(.setSeed(NA))) expect_true(is.numeric(.setSeed(NA))) expect_true(.setSeed() != .setSeed()) expect_equal(.setSeed(123), 123) expect_equal(.setSeed(0), 0) expect_equal(.setSeed(5e-5), 5e-5) }) test_that("Testing '.getInputForZeroOutputInsideTolerance''", { input <- 99 tolerance <- 1e-05 epsilon <- 1e-08 expect_equal(.getInputForZeroOutputInsideTolerance(input, tolerance, tolerance), input) expect_equal(.getInputForZeroOutputInsideTolerance(input, tolerance + epsilon, tolerance), NA_real_) expect_equal(.getInputForZeroOutputInsideTolerance(input, tolerance - epsilon, tolerance), input) }) test_that("Testing '.arrayToString'", { expect_equal(.arrayToString(NA, vectorLookAndFeelEnabled = TRUE), "NA") expect_equal(.arrayToString(NULL, vectorLookAndFeelEnabled = TRUE), "NULL") expect_equal(.arrayToString(c(1, 2, 3), vectorLookAndFeelEnabled = TRUE), "c(1, 2, 3)") expect_equal(.arrayToString(c(NA, 2, 3), vectorLookAndFeelEnabled = TRUE), "c(NA, 2, 3)") expect_equal(.arrayToString(c(1, 2, NA), vectorLookAndFeelEnabled = TRUE), "c(1, 2, NA)") expect_equal(.arrayToString(c(NA, NA, NA), vectorLookAndFeelEnabled = TRUE), "c(NA, NA, NA)") expect_equal(.arrayToString(c(1, NULL, 3), vectorLookAndFeelEnabled = TRUE), "c(1, 3)") }) test_that("Testing '.getQNorm'", { expect_equal(sign(.getQNorm(1)), sign(qnorm(1))) expect_equal(.getQNorm(1 - 1e-12), qnorm(1 - 1e-12)) expect_equal(sign(.getQNorm(0)), sign(qnorm(0))) expect_equal(.getQNorm(1e-12), qnorm(1e-12)) }) test_that("Testing '.getOneMinusQNorm'", { expect_equal(sign(.getOneMinusQNorm(1)), sign(1 - qnorm(1))) expect_equal(.getOneMinusQNorm(1 - 1e-12), -qnorm(1 - 1e-12)) expect_equal(sign(.getOneMinusQNorm(0)), sign(1 - qnorm(0))) expect_equal(.getOneMinusQNorm(1e-12), -qnorm(1e-12)) }) test_that("Testing '.getInputProducingZeroOutput'", { tolerance <- 1e-05 epsilon <- 1e-08 expect_equal(.getInputProducingZeroOutput(1, 0, 2, 99, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, 99, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, 0, NA, 0, tolerance), 1) expect_equal(.getInputProducingZeroOutput(NA, 0, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, 0, NA, NA, tolerance), 1) expect_equal(.getInputProducingZeroOutput(NA, NA, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, 0, 2, NA, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, NA, 2, 0, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, 99, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, 99, 2, tolerance, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, tolerance + epsilon, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, tolerance + epsilon, 2, tolerance, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, tolerance - epsilon, tolerance), 2) expect_equal(.getInputProducingZeroOutput(1, tolerance - epsilon, 2, tolerance, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, tolerance - epsilon, 2, tolerance, tolerance), 1) expect_equal(.getInputProducingZeroOutput(1, tolerance, 2, tolerance - epsilon, tolerance), 2) }) test_that("Testing '.getOneDimensionalRoot'", { .skipTestIfDisabled() tolerance <- 1e-08 expect_equal(.getOneDimensionalRoot(f = function(x) { x - 2 }, lower = -1, upper = 1, tolerance = tolerance), NA_real_) expect_equal(.getOneDimensionalRoot(f = function(x) { x + 2 }, lower = -1, upper = 1, tolerance = tolerance), NA_real_) expect_equal(.getOneDimensionalRoot(f = function(x) { x - 1 - tolerance }, lower = -1, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) { x + 1 + tolerance }, lower = -1, upper = 1, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) { x - 1 }, lower = -1, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) { x + 1 }, lower = -1, upper = 1, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) { x - 1 }, lower = 0, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) { x - 1 }, lower = tolerance, upper = 1, tolerance = tolerance), 1) expect_equal(.getOneDimensionalRoot(f = function(x) { x + 1 }, lower = -1, upper = 0, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) { x + 1 }, lower = -1, upper = 1 - tolerance, tolerance = tolerance), -1) expect_equal(.getOneDimensionalRoot(f = function(x) { x - 3 }, lower = 1, upper = 5, tolerance = tolerance), 3) expect_equal(.getOneDimensionalRoot(f = function(x) { x + 3 }, lower = -5, upper = -1, tolerance = tolerance), -3) expect_equal(.getOneDimensionalRoot(f = function(x) { 3 * x - 700 }, lower = 100, upper = 1000, tolerance = tolerance), 233.33333333) expect_equal(.getOneDimensionalRoot(f = function(x) { 3 * x + 700 }, lower = -1000, upper = -100, tolerance = tolerance), -233.33333333) expect_equal(.getOneDimensionalRoot(f = function(x) { x - 4 }, lower = -10, upper = 10), 4, tolerance = tolerance) expect_equal(.getOneDimensionalRoot(f = function(x) { x + 4 }, lower = -10, upper = 10), -4, tolerance = tolerance) dataExample1 <- getDataset( overallEvents = c(33, 55, 129), overallAllocationRatios = c(1, 1, 4), overallLogRanks = c(1.02, 1.38, 2.2) ) design1 <- getDesignGroupSequential(kMax = 3, alpha = 0.025, typeOfDesign = "WT", deltaWT = 0.25) result1 <- getRepeatedConfidenceIntervals(design1, dataExample1, stage = 3) ## Comparison of the results of matrixarray object 'result1' with expected results expect_equal(result1[1, ], c(0.54923831, 0.77922365, 1.0261298), tolerance = 1e-07) expect_equal(result1[2, ], c(3.7041718, 2.7014099, 2.5669073), tolerance = 1e-07) design2 <- getDesignGroupSequential( kMax = 3, alpha = 0.025, informationRates = c(0.4, 0.7, 1), typeOfDesign = "WT", deltaWT = 0.35 ) dataExample2 <- getDataset( overallN2 = c(30, 80, 100), overallN1 = c(30, 80, 100), overallEvents2 = c(10, 25, 36), overallEvents1 = c(14, 35, 53) ) result2 <- getRepeatedConfidenceIntervals( design = design2, dataInput = dataExample2, stage = 3, normalApproximation = TRUE, directionUpper = TRUE ) ## Comparison of the results of matrixarray object 'result2' with expected results expect_equal(result2[1, ], c(-0.17491833, -0.048575314, 0.018957987), tolerance = 1e-07) expect_equal(result2[2, ], c(0.41834377, 0.2916876, 0.31353674), tolerance = 1e-07) design3 <- getDesignInverseNormal( kMax = 2, alpha = 0.025, informationRates = c(0.5, 1), typeOfDesign = "WT", deltaWT = 0.25 ) dataExample3 <- getDataset( events1 = c(7, 57), events2 = c(7, 57), n1 = c(30, 300), n2 = c(30, 300) ) result3 <- getRepeatedConfidenceIntervals(design3, dataExample3) ## Comparison of the results of matrixarray object 'result3' with expected results expect_equal(result3[1, ], c(-0.26729325, -0.071746001), tolerance = 1e-07) expect_equal(result3[2, ], c(0.26729325, 0.071746001), tolerance = 1e-07) design4 <- getDesignInverseNormal( kMax = 2, alpha = 0.025, informationRates = c(0.5, 1), typeOfDesign = "WT", deltaWT = 0.25 ) dataExample4 <- getDataset( events1 = c(4, 55), events2 = c(4, 46), n1 = c(30, 300), n2 = c(30, 300) ) result4 <- getRepeatedConfidenceIntervals(design4, dataExample4) ## Comparison of the results of matrixarray object 'result4' with expected results expect_equal(result4[1, ], c(-0.23589449, -0.043528426), tolerance = 1e-07) expect_equal(result4[2, ], c(0.23589449, 0.088472144), tolerance = 1e-07) }) rpact/tests/testthat/test-class_design_set.R0000644000176200001440000000230214446750002020775 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-class_analysis_dataset.R ## | Creation date: 06 February 2023, 12:04:06 ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Class 'TrialDesignSet'") test_that("Test that design set class generics and utility functions throw errors outside of context", { expect_error(summary.TrialDesignSet()) expect_error(as.data.frame.TrialDesignSet()) expect_error(plot.TrialDesignSet()) expect_error(.plotTrialDesignSet()) expect_error(.addDecistionCriticalValuesToPlot()) }) rpact/tests/testthat/test-f_core_output_formats.R0000644000176200001440000003254614446300510022104 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_core_output_formats.R ## | Creation date: 06 February 2023, 12:11:55 ## | File version: $Revision: 7132 $ ## | Last changed: $Date: 2023-06-26 14:15:08 +0200 (Mon, 26 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Output Format Functions") test_that("'.formatPValues'", { # @refFS[Sec.]{fs:sec:outputFormats} # @refFS[Tab.]{fs:tab:outputFormats} x <- .formatPValues(0.0000234) ## Comparison of the results of character object 'x' with expected results expect_equal(x, "<0.0001") x <- .formatPValues(c(0.0000234, 0.0000134, 0.1234)) ## Comparison of the results of character object 'x' with expected results expect_equal(x, c("<0.0001", "<0.0001", "0.1234")) x <- .formatPValues(c(0.0002345678, 0.0000134, 0.1234, 0.000000000001, .00000009999)) ## Comparison of the results of character object 'x' with expected results expect_equal(x, c("0.0002346", "0.0000134", "0.1234000", "<0.000001", "<0.000001")) x <- .formatPValues(c(0.00234, 0.000013, 0.1234, 0.000000000001, .00000009999)) ## Comparison of the results of character object 'x' with expected results expect_equal(x, c("0.00234", "<0.0001", "0.12340", "<0.0001", "<0.0001")) x <- .formatPValues(c(6.244e-05, 4.906e-02, 1.446e-02, NA_real_)) ## Comparison of the results of character object 'x' with expected results expect_equal(x, c("<0.0001", "0.04906", "0.01446", "NA")) x <- .formatPValues(c(6.24408201934656e-05, 7.55449751868031e-05, 1.23207030919836e-05, NA_real_)) ## Comparison of the results of character object 'x' with expected results expect_equal(x, c("<0.0001", "<0.0001", "<0.0001", "NA")) }) test_that("'.formatRepeatedPValues'", { # @refFS[Sec.]{fs:sec:outputFormats} # @refFS[Tab.]{fs:tab:outputFormats} x <- .formatRepeatedPValues(c(0.0000234, 0.0000134, 0.1234)) ## Comparison of the results of character object 'x' with expected results expect_equal(x, c("<0.0001", "<0.0001", "0.1234")) x <- .formatRepeatedPValues(c(0.0000234, 0.0000134, 0.5234)) ## Comparison of the results of character object 'x' with expected results expect_equal(x, c("<0.0001", "<0.0001", ">0.5")) x <- .formatRepeatedPValues(c(0.0000234, 0.0000134, 0.5234, NA_real_)) ## Comparison of the results of character object 'x' with expected results expect_equal(x, c("<0.0001", "<0.0001", ">0.5", "NA")) }) test_that("'.formatConditionalPower'", { # @refFS[Sec.]{fs:sec:outputFormats} # @refFS[Tab.]{fs:tab:outputFormats} x <- .formatConditionalPower(c(0.0000234, 0.0000134, 0.5234, NA_real_)) ## Comparison of the results of character object 'x' with expected results expect_equal(x, c("0", "0", "0.5234", "NA")) x <- .formatConditionalPower(c(0.234, 0.123456, 0.6, 0.000001)) ## Comparison of the results of character object 'x' with expected results expect_equal(x, c("0.2340", "0.1235", "0.6000", "0")) }) test_that("'.formatProbabilities'", { # @refFS[Sec.]{fs:sec:outputFormats} # @refFS[Tab.]{fs:tab:outputFormats} x <- .formatProbabilities(c(NA_real_, NA_real_, 0.4536623, 0.7713048)) ## Comparison of the results of character object 'x' with expected results expect_equal(x, c("NA", "NA", "0.4537", "0.7713")) }) test_that("'.getDecimalPlaces'", { # @refFS[Sec.]{fs:sec:outputFormats} # @refFS[Tab.]{fs:tab:outputFormats} x <- .getDecimalPlaces(NA) ## Comparison of the results of integer object 'x' with expected results expect_equal(x, 0) x <- .getDecimalPlaces(12.123) ## Comparison of the results of integer object 'x' with expected results expect_equal(x, 3) x <- .getDecimalPlaces(c(6.661338e-16, 8.000000e-01, NA_real_)) ## Comparison of the results of integer object 'x' with expected results expect_equal(x, c(15, 1, 0)) x <- .getDecimalPlaces(c(6.661338e-16, 8.12300000e-02)) ## Comparison of the results of integer object 'x' with expected results expect_equal(x, c(15, 5)) }) test_that("Internal output format functions throw errors when arguments are missing or wrong", { expect_equal(.getFormattedValue(), "NA") expect_error(.assertIsValitOutputFormatOptionValue()) expect_error(.getOutputFormatOptions()) expect_error(.getOptionBasedFormattedValue()) expect_no_error(getOutputFormat()) expect_no_error(.getOutputFormat()) expect_error(.addFieldsToOutputFormatList()) expect_error(.getOutputFormatParameterNames()) expect_error(.getOutputFormatFunctionName()) expect_null(.getOutputFormatKeyByFieldName("xxx")) expect_error(.getOutputFormatKeyByFunctionName()) }) test_that(".assertIsValidOutputFormatOptionValue handles valid option value", { # Valid option value optionKey <- "exampleKey" optionValue <- "roundFunction = ceiling" # Call the function being tested result <- .assertIsValidOutputFormatOptionValue(optionKey, optionValue) # Expect no error or exception expect_null(result) }) test_that(".assertIsValidOutputFormatOptionValue handles invalid empty option value", { # Invalid empty option value optionKey <- "exampleKey" optionValue <- "" # Call the function being tested result <- capture_output(.assertIsValidOutputFormatOptionValue(optionKey, optionValue)) # Expect an error message expect_match(result, "") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.p.value'", { key <- "rpact.output.format.p.value" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatPValues") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.repeated.p.value'", { key <- "rpact.output.format.repeated.p.value" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatRepeatedPValues") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.probability'", { key <- "rpact.output.format.probability" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatProbabilities") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.futility.probability'", { key <- "rpact.output.format.futility.probability" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatFutilityProbabilities") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.sample.size'", { key <- "rpact.output.format.sample.size" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatSampleSizes") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.event'", { key <- "rpact.output.format.event" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatEvents") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.event.time'", { key <- "rpact.output.format.event.time" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatEventTime") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.conditional.power'", { key <- "rpact.output.format.conditional.power" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatConditionalPower") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.critical.value'", { key <- "rpact.output.format.critical.value" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatCriticalValues") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.critical.value.fisher'", { key <- "rpact.output.format.critical.value.fisher" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatCriticalValuesFisher") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.test.statistic.fisher'", { key <- "rpact.output.format.test.statistic.fisher" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatTestStatisticsFisher") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.test.statistic'", { key <- "rpact.output.format.test.statistic" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatTestStatistics") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.rate'", { key <- "rpact.output.format.rate" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatRates") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.rate1'", { key <- "rpact.output.format.rate1" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatRatesDynamic") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.accrual.intensity'", { key <- "rpact.output.format.accrual.intensity" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatAccrualIntensities") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.mean'", { key <- "rpact.output.format.mean" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatMeans") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.ratio'", { key <- "rpact.output.format.ratio" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatRatios") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.st.dev'", { key <- "rpact.output.format.st.dev" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatStDevs") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.duration'", { key <- "rpact.output.format.duration" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatDurations") }) test_that(".getOutputFormatFunctionName returns correct function name for key 'rpact.output.format.time'", { key <- "rpact.output.format.time" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect the correct function name expect_equal(result, ".formatTime") }) test_that(".getOutputFormatFunctionName returns NULL for unknown key", { key <- "unknown.key" # Call the function being tested result <- .getOutputFormatFunctionName(key) # Expect NULL as the result expect_null(result) }) test_that(".getOptionBasedFormattedValue returns NULL for unknown option key", { optionKey <- "unknown.key" value <- 0.123 # Call the function being tested result <- .getOptionBasedFormattedValue(optionKey, value) # Expect NULL as the result expect_null(result) }) rpact/tests/testthat/test-f_design_sample_size_calculator.R0000644000176200001440000242124514372422771024072 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_design_sample_size_calculator.R ## | Creation date: 06 February 2023, 12:13:20 ## | File version: $Revision: 6810 $ ## | Last changed: $Date: 2023-02-13 12:58:47 +0100 (Mo, 13 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Sample Size Calculation of Testing Means for Different Designs and Arguments") test_that("'getSampleSizeMeans': Sample size calculation of testing means for one sided group sequential design", { # @refFS[Formula]{fs:criticalValuesWangTiatis} # @refFS[Formula]{fs:inflationFactor} # @refFS[Formula]{fs:expectedReduction} designGS1pretest <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) ## Comparison of the results of TrialDesignGroupSequential object 'designGS1pretest' with expected results expect_equal(designGS1pretest$alphaSpent, c(0.0020595603, 0.0098772988, 0.02499999), tolerance = 1e-07) expect_equal(designGS1pretest$criticalValues, c(2.8688923, 2.3885055, 2.0793148), tolerance = 1e-07) expect_equal(designGS1pretest$stageLevels, c(0.0020595603, 0.0084585282, 0.018794214), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designGS1pretest), NA))) expect_output(print(designGS1pretest)$show()) invisible(capture.output(expect_error(summary(designGS1pretest), NA))) expect_output(summary(designGS1pretest)$show()) designGS1pretestCodeBased <- eval(parse(text = getObjectRCode(designGS1pretest, stringWrapParagraphWidth = NULL))) expect_equal(designGS1pretestCodeBased$alphaSpent, designGS1pretest$alphaSpent, tolerance = 1e-05) expect_equal(designGS1pretestCodeBased$criticalValues, designGS1pretest$criticalValues, tolerance = 1e-05) expect_equal(designGS1pretestCodeBased$stageLevels, designGS1pretest$stageLevels, tolerance = 1e-05) expect_type(names(designGS1pretest), "character") df <- as.data.frame(designGS1pretest) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designGS1pretest) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } designGS1 <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeOneMeanVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 0.8) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 494.6455, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 98.929099, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 247.32275, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 494.6455, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 491.89699, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 462.87248, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 360.24062, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.090771, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.80583608, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.68748891, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeOneMeanVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 0.8) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 492.61495, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 98.522991, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 246.30748, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 492.61495, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 489.87773, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 460.97237, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 358.76182, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.0780634, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.80438093, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.68736844, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 107.00299, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 53.501497, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 53.501497, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 21.400599, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 53.501497, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 107.00299, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 106.40843, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 100.12977, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 77.928183, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8110917, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.3500437, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81436669, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 20.987146, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 104.35265, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 98.195298, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 76.422636, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.5049412, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.318984, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81192991, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 141.97133, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 106.4785, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 35.492832, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 28.394266, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 70.985664, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 141.97133, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 21.295699, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 53.239248, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 106.4785, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 7.0985664, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 17.746416, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 35.492832, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 141.18246, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 132.85195, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 103.39494, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.7228801, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.3419598, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81376184, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 139.91431, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 34.978577, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 27.982861, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 69.957153, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 139.91431, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 20.987146, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 52.467865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 104.93573, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 6.9957153, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 17.489288, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 34.978577, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 139.13687, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 130.92706, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 101.89685, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.5049412, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.318984, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.81192991, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 2.1, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 71.36231, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 35.681155, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 35.681155, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 14.272462, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 35.681155, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 71.36231, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 70.965784, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 66.77843, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 51.971772, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.222748, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.1829515, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5038177, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 2.1, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 69.273978, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 34.636989, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 34.636989, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 13.854796, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 34.636989, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 69.273978, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 68.889056, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 64.824239, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 50.450881, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5830046, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.123365, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.4992983, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 2.1, allocationRatioPlanned = 0.4) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 86.937573, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 24.839307, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 62.098267, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 17.387515, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 43.468787, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 86.937573, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 4.9678613, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 12.419653, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 24.839307, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 12.419653, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 31.049133, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 62.098267, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 86.454503, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 81.353233, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 63.314931, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.0734522, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.1712593, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5029983, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 2.1, allocationRatioPlanned = 0.4) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 84.860623, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 24.245892, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 60.614731, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 16.972125, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 42.430311, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 84.860623, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 4.8491785, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 12.122946, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 24.245892, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 12.122946, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 30.307365, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 60.614731, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 84.389093, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 79.409693, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 61.802329, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5830046, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.123365, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.4992983, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = FALSE, alternative = 1.9, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 363.14949, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 181.57474, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 181.57474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 72.629897, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 181.57474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 363.14949, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 361.13164, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 339.82298, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 264.47466, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8861856, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9212807, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5251098, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = TRUE, alternative = 1.9, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 361.11139, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 180.5557, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 180.5557, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 72.222278, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 180.5557, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 361.11139, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 359.10487, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 337.9158, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 262.99035, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8268779, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9146031, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5245615, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = FALSE, alternative = 1.9, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 458.2463, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 343.68473, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 114.56158, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 91.64926, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 229.12315, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 458.2463, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 68.736945, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 171.84236, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 343.68473, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 22.912315, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 57.280788, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 114.56158, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 455.70005, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 428.81135, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 333.7318, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8732837, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9198713, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5249957, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = TRUE, alternative = 1.9, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 456.21071, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 342.15803, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 114.05268, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 91.242142, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 228.10535, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 456.21071, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 68.431606, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 171.07902, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 342.15803, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 22.810535, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 57.026339, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 114.05268, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 453.67577, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 426.90651, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 332.24932, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8268779, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9146031, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5245615, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansRatioVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizeRatioMeansOptimumAllocationRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 2, meanRatio = TRUE, thetaH0 = 0.9, stDev = 3, normalApproximation = TRUE, alternative = 1.9, allocationRatioPlanned = 0) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$allocationRatioPlanned, 1.1111111, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 360.11385, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 189.5336, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 170.58024, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 72.022769, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 180.05692, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 360.11385, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 37.906721, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 94.766802, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 189.5336, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 34.116049, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 85.290122, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 170.58024, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 358.11287, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 336.98233, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 262.26386, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 2.8268779, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9146031, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5245615, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$allocationRatioPlanned, sampleSizeResult$allocationRatioPlanned, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeMeans': Sample size calculation of testing means for two sided group sequential design", { .skipTestIfDisabled() # @refFS[Formula]{fs:criticalValuesWangTiatis} # @refFS[Formula]{fs:inflationFactor} # @refFS[Formula]{fs:expectedReduction} designGS2pretest <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) ## Comparison of the results of TrialDesignGroupSequential object 'designGS2pretest' with expected results expect_equal(designGS2pretest$alphaSpent, c(0.12265406, 0.26238998, 0.39999999), tolerance = 1e-07) expect_equal(designGS2pretest$criticalValues, c(1.5437287, 1.2852363, 1.1188632), tolerance = 1e-07) expect_equal(designGS2pretest$stageLevels, c(0.06132703, 0.099354859, 0.13159925), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designGS2pretest), NA))) expect_output(print(designGS2pretest)$show()) invisible(capture.output(expect_error(summary(designGS2pretest), NA))) expect_output(summary(designGS2pretest)$show()) designGS2pretestCodeBased <- eval(parse(text = getObjectRCode(designGS2pretest, stringWrapParagraphWidth = NULL))) expect_equal(designGS2pretestCodeBased$alphaSpent, designGS2pretest$alphaSpent, tolerance = 1e-05) expect_equal(designGS2pretestCodeBased$criticalValues, designGS2pretest$criticalValues, tolerance = 1e-05) expect_equal(designGS2pretestCodeBased$stageLevels, designGS2pretest$stageLevels, tolerance = 1e-05) expect_type(names(designGS2pretest), "character") df <- as.data.frame(designGS2pretest) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designGS2pretest) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } designGS2 <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeOneMeanVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 0.8) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 234.92433, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 46.984866, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 117.46217, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 234.92433, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 195.45911, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 176.81177, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 134.60888, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.041134725, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.26146972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.3536511, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.95886527, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.73853028, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.6463489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeOneMeanVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageOneMean} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = TRUE, alternative = 0.8) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 234.50706, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 46.901412, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 117.25353, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 234.50706, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 195.11194, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 176.49772, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 134.36979, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.049174965, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.26261678, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.35387349, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.95082503, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.73738322, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.64612651, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 50.39219, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 25.196095, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 25.196095, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 10.078438, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 25.196095, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 50.39219, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 41.926745, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 37.926818, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 28.874132, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -2.1720469, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0543228, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63787834, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.1720469, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0543228, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63787834, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 9.9908334, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 41.562306, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 37.597148, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 28.62315, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceUnknownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = FALSE, alternative = 1.3, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 67.037534, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 50.27815, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 16.759383, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 13.407507, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 33.518767, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 67.037534, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 10.05563, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 25.139075, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 50.27815, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 3.3518767, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 8.3796917, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 16.759383, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 55.775818, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 50.454651, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 38.411718, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -2.1030977, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0473776, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63668307, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.1030977, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0473776, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63668307, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeMeans} # @refFS[Formula]{fs:sampleSizeTwoMeansDiffVarianceKnownOnesided} # @refFS[Formula]{fs:sampleSizePerStageTwoMeans} sampleSizeResult <- getSampleSizeMeans(designGS2, groups = 2, thetaH0 = 0, stDev = 2, normalApproximation = TRUE, alternative = 1.3, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanMeans object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 66.605556, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 16.651389, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 13.321111, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 33.302778, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 66.605556, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 9.9908334, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 24.977083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 49.954167, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 3.3302778, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 8.3256945, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 16.651389, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 55.416408, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 50.12953, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 38.164199, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 1.9535752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.0286606, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.63321489, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing the Sample Size Calculation of Testing Rates for Different Designs and Arguments") test_that("'getSampleSizeRates': Sample size calculation of testing rates for one sided group sequential design", { .skipTestIfDisabled() designGS1 <- getDesignGroupSequential(informationRates = c(0.2, 0.5, 1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3) # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:sampleSizeOneRateExactOnesidedLargerpi1} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 1, thetaH0 = 0.5, pi1 = 0.8, normalApproximation = FALSE) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 5.9072033, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 14.768008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 29.371899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 27.638803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 21.510502, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.090192, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.81076728, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.6912997, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:sampleSizeOneRateExactOnesidedSmallerpi1} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 1, thetaH0 = 0.5, pi1 = 0.2, normalApproximation = FALSE) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 5.9072033, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 14.768008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 29.536017, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 29.371899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 27.638803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 21.510502, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], -0.090191958, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.18923272, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.3087003, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:sampleSizeOneRateApproximation} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 1, thetaH0 = 0.5, pi1 = 0.8, normalApproximation = TRUE) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 26.111979, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 5.2223957, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 13.055989, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 26.111979, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 25.966887, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 24.434704, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 19.016842, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.127696, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.83051514, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.70345593, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:sampleSizeOneRateExactOnesidedSmallerpi1} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 261.60183, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 130.80091, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 130.80091, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 52.320365, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 130.80091, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 261.60183, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 260.14823, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 244.79812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 190.51949, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.39662162, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.20482715, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.12354802, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 349.41307, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 262.0598, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 87.353268, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 69.882614, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 174.70654, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 349.41307, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 52.411961, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 131.0299, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 262.0598, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 17.470654, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 43.676634, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 87.353268, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 347.47155, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 326.9689, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 254.47069, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.38949339, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.20784714, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.12553463, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0.2, pi1 = 0.5, pi2 = 0.1, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 201.70565, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 100.85283, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 100.85283, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 40.341131, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 100.85283, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 201.70565, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 200.58487, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 188.74931, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 146.89828, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.6326463, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.40827798, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.32212934, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0.2, pi1 = 0.5, pi2 = 0.1, allocationRatioPlanned = 0.4) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 267.48868, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 76.425337, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 191.06334, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 53.497736, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 133.74434, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 267.48868, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 15.285067, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 38.212668, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 76.425337, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 38.212668, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 95.531671, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 191.06334, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 266.00237, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 250.30683, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 194.80676, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.59822838, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.40051537, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.32119139, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:EstimatesDiffFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizeRatesDiffOptimumAllocationRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, thetaH0 = 0.2, pi1 = 0.5, pi2 = 0.1, allocationRatioPlanned = 0) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$allocationRatioPlanned, 1.1669392, tolerance = 1e-07) expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 200.45189, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 107.94727, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 92.504622, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 40.090378, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 100.22594, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 200.45189, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 21.589453, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 53.973634, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 107.94727, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 18.500924, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 46.252311, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 92.504622, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 199.33807, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 187.57608, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 145.98518, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.63834776, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.41018483, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.32243267, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$allocationRatioPlanned, sampleSizeResult$allocationRatioPlanned, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, riskRatio = TRUE, thetaH0 = 0.9, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 171.20812, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 85.604059, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 85.604059, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 34.241624, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 85.604059, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 171.20812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 170.2568, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 160.21075, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 124.68752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.1899424, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.0225352, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5569402, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} # @refFS[Formula]{fs:sampleSizeTwoRatesRatio} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, riskRatio = TRUE, thetaH0 = 0.9, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 221.72371, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 166.29278, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 55.430927, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 44.344741, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 110.86185, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 221.72371, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 33.258556, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 83.14639, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 166.29278, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 11.086185, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 27.715463, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 55.430927, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 220.4917, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 207.48153, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 161.47703, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.1917697, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.0740853, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5843199, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:EstimatesRatioFarringtonManning} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} # @refFS[Formula]{fs:sampleSizeTwoRatesRatioOptimumAllocationRatio} sampleSizeResult <- getSampleSizeRates(designGS1, groups = 2, riskRatio = TRUE, thetaH0 = 0.9, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 0) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$allocationRatioPlanned, 1.0304199, tolerance = 1e-07) expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 171.17189, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 86.868201, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 84.303693, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 34.234379, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 85.585947, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 171.17189, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 17.37364, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 43.434101, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 86.868201, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 16.860739, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 42.151846, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 84.303693, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 170.22077, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 160.17685, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 124.66114, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.1919838, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.0241846, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5576701, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$allocationRatioPlanned, sampleSizeResult$allocationRatioPlanned, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeRates': Sample size calculation of testing rates for two sided group sequential design", { .skipTestIfDisabled() designGS2 <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:sampleSizeOneRateApproximation} # @refFS[Formula]{fs:sampleSizePerStageOneRate} sampleSizeResult <- getSampleSizeRates(designGS2, groups = 1, thetaH0 = 0.5, pi1 = 0.8, normalApproximation = TRUE) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 11.331566, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 2.2663131, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 5.6657828, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 11.331566, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 9.4279622, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 8.5285086, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 6.4928537, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -0.01272092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.23002532, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.33381109, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 1.0127209, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.76997468, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.66618891, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS2, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 123.43553, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 61.717765, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 61.717765, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 24.687106, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 61.717765, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 123.43553, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 102.69945, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 92.901636, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 70.727105, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -0.23899172, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -0.13791313, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.087906186, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.30941892, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.15876644, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.095938144, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeRates} # @refFS[Formula]{fs:sampleSizeTwoRatesDiff} # @refFS[Formula]{fs:sampleSizePerStageTwoRates} sampleSizeResult <- getSampleSizeRates(designGS2, groups = 2, thetaH0 = 0, pi1 = 0.5, pi2 = 0.3, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanRates object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 162.30744, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 121.73058, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 40.576859, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 32.461488, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 81.153719, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 162.30744, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 24.346116, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 60.865289, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 121.73058, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 8.1153719, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 20.28843, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 40.576859, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 135.04122, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 122.15791, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 93.000251, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], -0.21587527, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], -0.13203224, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], -0.086052993, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 0.31213587, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 0.16272503, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 0.09811449, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH0, sampleSizeResult$expectedNumberOfSubjectsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH01, sampleSizeResult$expectedNumberOfSubjectsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing the Sample Size Calculation of Survival Designs for Different Designs and Arguments") test_that("'getSampleSizeSurvival': Fixed sample size with minimum required definitions, pi1 = c(0.4, 0.5, 0.6) and pi2 = 0.2 at event time 12, accrual time 12 and follow-up time 6 as default, only alpha = 0.01 is specified", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(alpha = 0.01) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(58.52451, 31.248898, 20.120262), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(16.482222, 7.5670212, 4.2761841), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsFixed, c(58.52451, 31.248898, 20.120262), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed, c(197.78666, 90.804254, 51.314209), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, c(98.893329, 45.402127, 25.657105), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, c(98.893329, 45.402127, 25.657105), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 18) expect_equal(sampleSizeResult$studyDuration, 18) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.8370942, 2.2986321, 2.821477), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsFixed, sampleSizeResult$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed, sampleSizeResult$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed1, sampleSizeResult$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed2, sampleSizeResult$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Sample size calculation of survival designs for one sided group sequential design and typeOfComputation = 'Schoenfeld'", { .skipTestIfDisabled() designGS1 <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, accountForObservationTimes = FALSE, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 2) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 218.14225, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 145.42817, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 72.714085, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.714085, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 18.178521, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.542817, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.357042, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.714085, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 72.310048, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 68.043375, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.956243, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 218.14225, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 145.42817, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 72.714085, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 250.03082, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 187.52311, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 62.507704, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 20.835901, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 14.391854, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 17.235931, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 43.089828, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 85.700797, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 80.644, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 62.762955, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 146.60794, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 238.15931, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 250.03082, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 109.95596, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 178.61948, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 187.52311, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 36.651986, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 59.539826, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 62.507704, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 236.50498, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, accountForObservationTimes = FALSE, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 354.24994, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 177.12497, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 177.12497, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 29.520829, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.254997, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 53.137492, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 105.68446, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 99.448526, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 77.397988, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 354.24994, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, accountForObservationTimes = FALSE, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 404.85708, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 303.64281, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 101.21427, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 33.73809, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], 28.339996, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 70.849989, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 140.91262, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 132.59803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 103.19732, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 404.85708, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 303.64281, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 101.21427, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 216.4138, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 108.2069, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 108.2069, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 27.051725, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 12.926948, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.317371, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 64.275598, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 60.483, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 47.072216, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 168.44491, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 216.4138, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 216.4138, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 212.39441, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 247.45413, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 185.5906, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 61.863534, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.931767, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 17.235931, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 43.089828, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 85.700797, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 80.644, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 62.762955, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 190.83096, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 247.45413, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 247.45413, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 143.12322, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 185.5906, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 185.5906, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 47.70774, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 61.863534, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 61.863534, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 242.70959, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 355.83608, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 177.91804, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 177.91804, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 44.47951, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.254997, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 53.137492, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 105.68446, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 99.448526, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 77.397988, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 276.96374, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 355.83608, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 355.83608, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 349.22724, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 406.87381, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 305.15536, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 101.71845, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 50.859227, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 28.339996, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 70.849989, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 140.91262, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 132.59803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 103.19732, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 313.77176, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 406.87381, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 406.87381, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 235.32882, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 305.15536, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 305.15536, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 78.442941, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 101.71845, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 101.71845, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 399.07264, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 224.258, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 112.129, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 112.129, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 28.03225, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 12.926948, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 32.317371, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 64.634742, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 64.275598, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 60.483, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 47.072216, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 172.34323, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 224.258, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 224.258, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 219.90797, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 257.43359, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 193.07519, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 64.358398, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 32.179199, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.0820828, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.109775, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.771337, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 17.235931, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 43.089828, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 86.179656, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 85.700797, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 80.644, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 62.762955, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 195.71655, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 257.43359, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 257.43359, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 146.78741, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 193.07519, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 193.07519, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 48.929138, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 64.358398, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 64.358398, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 252.26222, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.9325954, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3170793, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6774418, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 368.73381, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 184.36691, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 184.36691, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 46.091727, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.254997, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 53.137492, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 106.27498, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 105.68446, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 99.448526, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 77.397988, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 283.37351, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 368.73381, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 368.73381, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 361.58134, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 423.28243, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 317.46182, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 105.82061, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 52.910303, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.0820828, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.109775, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.771337, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 28.339996, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 70.849989, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 141.69998, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 140.91262, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 132.59803, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 103.19732, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 321.80485, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 423.28243, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 423.28243, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 241.35364, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 317.46182, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 317.46182, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 80.451212, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 105.82061, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 105.82061, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 414.77946, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1.2, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 359.78876, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 206.97289, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 152.81587, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 108.73874, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 1.3543939, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 44.973595, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1258711, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.176695, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.802401, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 21.747749, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 54.369372, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 108.73874, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 108.13454, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 101.75403, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 79.192297, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 275.50245, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 359.78876, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 359.78876, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 158.48615, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 206.97289, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 206.97289, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 117.01629, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 152.81587, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 152.81587, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 352.72627, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.1656631, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.3109184, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7962847, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$allocationRatioPlanned, sampleSizeResult$allocationRatioPlanned, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': sample size calculation of survival designs for one sided group sequential design and typeOfComputation = 'Freedman'", { .skipTestIfDisabled() designGS1 <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, accountForObservationTimes = FALSE, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 240.49104, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 120.24552, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 120.24552, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 20.04092, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 240.49104, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, accountForObservationTimes = FALSE, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 393.13025, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 294.84769, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 98.282562, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 32.760854, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], 27.519117, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 68.797794, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 136.83103, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 128.75728, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 100.20817, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 393.13025, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 294.84769, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 98.282562, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5359417, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9445403, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5058707, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.195978, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 188.02345, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 237.08125, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 395.08857, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 296.31643, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 98.772142, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 49.386071, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 27.519117, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 68.797794, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 137.59559, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 136.83103, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 128.75728, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 100.20817, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 304.68325, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 395.08857, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 395.08857, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 228.51244, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 296.31643, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 296.31643, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 76.170813, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 98.772142, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 98.772142, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 387.51336, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 3.5359417, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.9445403, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.5058707, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 31.29047, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 192.37488, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 245.46813, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 236.31869, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 89.479288, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 146.8394, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 62.770758, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 0.60936839, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 29.539836, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1891498, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.272294, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.846839, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 12.554152, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 31.385379, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 62.770758, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 62.421971, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 58.738747, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 45.714713, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 182.82647, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 236.31869, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 236.31869, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 69.22509, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 89.479288, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 89.479288, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 113.60138, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 146.8394, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 146.8394, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 231.83649, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 5.3084847, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.4084373, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.7178517, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$allocationRatioPlanned, sampleSizeResult$allocationRatioPlanned, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': sample size calculation of survival designs for one sided group sequential design and typeOfComputation = 'HsiehFreedman'", { .skipTestIfDisabled() designGS1 <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, accountForObservationTimes = FALSE, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 240.49104, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 120.24552, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 120.24552, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 20.04092, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 240.49104, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS1, accountForObservationTimes = FALSE, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 274.8469, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 206.13518, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 68.711726, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 22.903909, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], 19.239283, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 48.098208, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 95.661899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 90.017344, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 70.057965, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 274.8469, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 206.13518, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 68.711726, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 120.78391, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.195978, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.872604, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 188.02345, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 241.56782, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 237.08125, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 276.21601, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 207.16201, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 69.054003, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 34.527001, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.832344, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 19.239283, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 48.098208, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 96.196416, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 95.661899, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 90.017344, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 70.057965, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 213.01146, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 276.21601, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 276.21601, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 159.75859, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 207.16201, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 207.16201, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 53.252865, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 69.054003, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 69.054003, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 270.92, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 125.16188, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 31.29047, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.818027, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.429462, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.073656, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 72.147312, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 71.746424, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 67.513008, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 52.543474, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 192.37488, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 250.32376, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 245.46813, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 244.2512, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 140.50849, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 103.74271, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 73.819895, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 1.3543939, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.5314, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1258711, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.176695, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 13.802401, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 14.763979, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 36.909947, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 73.819895, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 73.409713, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 69.078154, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 53.761583, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 187.03142, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 244.2512, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 244.2512, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 107.59211, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 140.50849, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 140.50849, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 79.439306, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 103.74271, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 103.74271, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 239.45666, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 4.52897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 2.2152278, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 1.6316611, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$allocationRatioPlanned, sampleSizeResult$allocationRatioPlanned, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for two sided group sequential design and typeOfComputation = 'Schoenfeld'", { .skipTestIfDisabled() designGS2 <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), alpha = 0.4, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, accountForObservationTimes = FALSE, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 102.56356, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 51.281781, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 51.281781, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 8.5469636, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.1538138, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.384534, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 25.600136, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.157812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 17.630314, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 102.56356, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, accountForObservationTimes = FALSE, typeOfComputation = "Schoenfeld", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 117.2155, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 87.911625, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 29.303875, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 9.7679584, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], 8.205085, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 20.512713, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 34.133514, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 30.877083, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 23.507086, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 117.2155, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 87.911625, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 29.303875, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 103.02279, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 51.511393, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 51.511393, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 12.877848, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 11.672467, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.1538138, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.384534, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 25.600136, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.157812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 17.630314, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 80.187417, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 103.02279, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 103.02279, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 96.108996, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 117.79939, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 88.349544, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 29.449848, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 14.724924, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 11.623913, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 8.205085, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 20.512713, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 41.025425, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 34.133514, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 30.877083, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 23.507086, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 90.844192, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 117.79939, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 117.79939, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 68.133144, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 88.349544, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 88.349544, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 22.711048, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 29.449848, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 29.449848, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 109.63825, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 106.75698, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 53.378489, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 53.378489, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 13.344622, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 11.606421, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.1538138, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.384534, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 30.769069, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 25.600136, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.157812, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 17.630314, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 82.043195, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 106.75698, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 106.75698, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 99.274467, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalSchoenfeld} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 104.16718, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 59.923444, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 44.243734, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 31.482385, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 1.3543939, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 13.020897, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.30276671, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.3601177, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.23711559, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.66288441, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1258711, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.176695, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 11.587598, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 6.2964769, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 15.741192, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 31.482385, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 26.193621, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 23.694677, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 18.039036, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 79.764338, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 104.16718, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 104.16718, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 45.885412, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 59.923444, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 59.923444, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 33.878926, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 44.243734, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 44.243734, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 96.778811, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.28805692, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.51926225, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.66803619, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.4715361, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.9258092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4969249, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.12265406, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.19870972, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.26319851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$allocationRatioPlanned, sampleSizeResult$allocationRatioPlanned, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for two sided group sequential design and typeOfComputation = 'Freedman'", { .skipTestIfDisabled() designGS2 <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), alpha = 0.25, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 146.14538, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 73.072689, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 73.072689, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 43.244002, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 12.178781, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0974672, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.495939, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 12.987598, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 8.6488004, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 21.622001, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 43.244002, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 39.334079, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 35.691647, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 26.91074, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 86.438503, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 140.00653, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 146.14538, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 130.17577, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.27467837, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.50642065, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.65781752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.6406215, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.974643, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.5201784, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, accountForObservationTimes = FALSE, typeOfComputation = "Freedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 235.6363, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 176.72722, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 58.909074, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 82.472703, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 19.636358, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$eventsPerStage[1, ], 16.494541, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 41.236352, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 82.472703, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 75.015902, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 68.069247, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 51.322759, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 235.6363, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 176.72722, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 58.909074, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.33945377, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.56614959, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.70454917, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.9459092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.7663176, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4193473, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 144.79208, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 72.396041, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 72.396041, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 43.244002, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 18.09901, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 12.334566, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 8.6488004, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 21.622001, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 43.244002, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 39.334079, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 35.691647, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 26.91074, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 112.6984, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 144.79208, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 144.79208, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 137.49311, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.27467837, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.50642065, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.65781752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.6406215, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.974643, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.5201784, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 236.81008, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 177.60756, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 59.20252, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 82.472703, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 29.60126, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 12.287795, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 16.494541, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 41.236352, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 82.472703, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 75.015902, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 68.069247, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 51.322759, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 182.62251, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 236.81008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 236.81008, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 136.96688, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 177.60756, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 177.60756, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 45.655628, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 59.20252, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 59.20252, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 224.48637, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.33945377, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.56614959, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.70454917, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.9459092, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.7663176, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.4193473, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 150.04026, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 75.020128, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 75.020128, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 43.244002, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 18.755032, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1480342, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.21033, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 12.271017, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 8.6488004, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 21.622001, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 43.244002, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 39.334079, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 35.691647, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 26.91074, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 115.30658, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 150.04026, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 150.04026, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 142.14088, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.27467837, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.50642065, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.65781752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.6406215, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.974643, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.5201784, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedman} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "Freedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 0) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 141.64583, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 53.632525, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 88.013303, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 37.623838, tolerance = 1e-07) expect_equal(sampleSizeResult$allocationRatioPlanned, 0.60936839, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 17.705728, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1891498, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.272294, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 12.304499, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 7.5247676, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 18.811919, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 37.623838, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 34.222064, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 31.053018, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 23.413313, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 109.58341, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 141.64583, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 141.64583, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 41.492467, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 53.632525, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 53.632525, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 68.09094, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 88.013303, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 88.013303, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 134.35397, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.23978557, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.47145911, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.62947897, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 4.1703928, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 2.1210747, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.5886154, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$allocationRatioPlanned, sampleSizeResult$allocationRatioPlanned, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } getDesignCharacteristics(designGS2) }) test_that("'getSampleSizeSurvival': sample size calculation of survival data for two sided group sequential design and typeOfComputation = 'HsiehFreedman'", { .skipTestIfDisabled() designGS2 <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), alpha = 0.25, sided = 2, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, accountForObservationTimes = FALSE, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 144.14667, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 72.073337, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 72.073337, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 43.244002, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 12.012223, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$eventsPerStage[1, ], 8.6488004, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 21.622001, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 43.244002, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 39.334079, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 35.691647, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 26.91074, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[2, ], NA_real_) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 144.14667, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.27467837, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.50642065, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.65781752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.6406215, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.974643, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.5201784, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityNotAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", pi1 = 0.4, pi2 = 0.2, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 167.28361, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 125.46271, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 41.820903, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 57.658669, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 13.940301, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 7.0363139, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 11.430238, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 12.948104, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 11.531734, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 28.829335, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 57.658669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 52.445438, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 47.588863, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 35.880987, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 98.088334, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 159.34095, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 167.28361, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 73.56625, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 119.50572, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 125.46271, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 24.522083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 39.835239, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 41.820903, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 148.45363, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.27467837, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.50642065, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.65781752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.6406215, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.974643, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.5201784, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 144.79208, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 72.396041, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 72.396041, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 43.244002, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 18.09901, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267714, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.327532, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 12.334566, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 8.6488004, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 21.622001, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 43.244002, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 39.334079, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 35.691647, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 26.91074, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 112.6984, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 144.79208, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 144.79208, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 137.49311, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.27467837, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.50642065, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.65781752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.6406215, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.974643, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.5201784, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 165.55968, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 124.16976, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 41.389919, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 57.658669, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 20.69496, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.1694167, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.240925, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 12.287795, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 11.531734, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 28.829335, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 57.658669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 52.445438, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 47.588863, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 35.880987, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 127.67583, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 165.55968, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 165.55968, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 95.756873, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 124.16976, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 124.16976, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 31.918958, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 41.389919, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 41.389919, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 156.94387, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.27467837, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.50642065, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.65781752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.6406215, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.974643, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.5201784, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = c(0.3, 0.4), pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(27.207015, 18.996816), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.025476782, 0.036487545), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(1.5984103, 2.2892242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(634.39599, 172.23645), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(475.79699, 129.17734), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(158.599, 43.059113), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(167.01364, 57.658669), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(79.299498, 21.529557), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(6.2267383, 6.0820828), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(10.326085, 10.109775), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], c(18, 18)) expect_equal(sampleSizeResult$studyDuration, c(12.333995, 12.216859), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(33.402728, 11.531734), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(83.50682, 28.829335), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], c(167.01364, 57.658669), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(151.91304, 52.445438), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(137.84552, 47.588863), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(103.93258, 35.880987), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(493.77723, 130.94455), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(634.39599, 172.23645), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], c(634.39599, 172.23645), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], c(370.33292, 98.20841), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], c(475.79699, 129.17734), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], c(475.79699, 129.17734), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], c(123.44431, 32.736137), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], c(158.599, 43.059113), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], c(158.599, 43.059113), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(602.41549, 162.84556), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], c(0.4680288, 0.27467837), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], c(0.67047266, 0.50642065), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], c(0.78185284, 0.65781752), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], c(2.1366206, 3.6406215), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], c(1.4914851, 1.974643), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], c(1.2790131, 1.5201784), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.3, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 27.207015, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.025476782, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 1.5984103, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 634.39599, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 475.79699, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 158.599, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 167.01364, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 79.299498, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.2267383, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.326085, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 12.333995, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 33.402728, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 83.50682, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 167.01364, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 151.91304, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 137.84552, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 103.93258, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 493.77723, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 634.39599, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 634.39599, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 370.33292, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 475.79699, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 475.79699, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 123.44431, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 158.599, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 158.599, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 602.41549, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.4680288, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.67047266, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.78185284, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 2.1366206, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.4914851, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.2790131, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$median1, 18.996816, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.036487545, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 172.23645, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 129.17734, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 43.059113, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 57.658669, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 21.529557, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], 6.0820828, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 10.109775, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], 18) expect_equal(sampleSizeResult$studyDuration, 12.216859, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 11.531734, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 28.829335, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], 57.658669, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 52.445438, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 47.588863, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 35.880987, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 130.94455, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 172.23645, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 172.23645, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], 98.20841, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], 129.17734, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], 129.17734, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], 32.736137, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], 43.059113, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], 43.059113, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 162.84556, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], 0.27467837, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], 0.50642065, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], 0.65781752, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], 3.6406215, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], 1.974643, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], 1.5201784, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} sampleSizeResult <- getSampleSizeSurvival(designGS2, maxNumberOfSubjects = 0, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = c(0.3, 0.4), pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(27.207015, 18.996816), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.025476782, 0.036487545), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(1.5984103, 2.2892242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(634.39599, 172.23645), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(475.79699, 129.17734), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(158.599, 43.059113), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(167.01364, 57.658669), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(79.299498, 21.529557), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(6.2267383, 6.0820828), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(10.326085, 10.109775), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], c(18, 18)) expect_equal(sampleSizeResult$studyDuration, c(12.333995, 12.216859), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(33.402728, 11.531734), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(83.50682, 28.829335), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], c(167.01364, 57.658669), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(151.91304, 52.445438), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(137.84552, 47.588863), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(103.93258, 35.880987), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(493.77723, 130.94455), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(634.39599, 172.23645), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], c(634.39599, 172.23645), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], c(370.33292, 98.20841), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], c(475.79699, 129.17734), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], c(475.79699, 129.17734), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], c(123.44431, 32.736137), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], c(158.599, 43.059113), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], c(158.599, 43.059113), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(602.41549, 162.84556), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], c(0.4680288, 0.27467837), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], c(0.67047266, 0.50642065), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], c(0.78185284, 0.65781752), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], c(2.1366206, 3.6406215), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], c(1.4914851, 1.974643), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], c(1.2790131, 1.5201784), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalFreedmanHsieh} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventProbabilityAcccountForOberservationTimes} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} # @refFS[Formula]{fs:sampleSizeSurvivalExpectedTimePoints} # @refFS[Formula]{fs:sampleSizeSurvivalOptimumAllocationRatio} sampleSizeResult <- getSampleSizeSurvival(maxNumberOfSubjects = 468, designGS2, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = c(0.3, 0.4), pi2 = 0.2, eventTime = 14, accrualTime = 8, dropoutRate1 = 0.1, dropoutRate2 = 0.05, dropoutTime = 16, accountForObservationTimes = TRUE, allocationRatioPlanned = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(27.207015, 18.996816), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 43.487972, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.025476782, 0.036487545), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.015938825, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(1.5984103, 2.2892242), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(468, 468)) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(351, 351)) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(117, 117)) expect_equal(sampleSizeResult$maxNumberOfEvents, c(167.01364, 57.658669), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 58.5, tolerance = 1e-07) expect_equal(sampleSizeResult$followUpTime, c(16.753912, 0.380791), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.22742698, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.38942935, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.28314367, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.61685633, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[1, ], 0.2, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[2, ], 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$informationRates[3, ], 1) expect_equal(sampleSizeResult$analysisTime[1, ], c(7.2865998, 3.6319976), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(12.859517, 5.8243524), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[3, ], c(24.753912, 8.380791), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(16.149347, 6.305235), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(24.753912, 8.380791), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(33.402728, 11.531734), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(83.50682, 28.829335), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[3, ], c(167.01364, 57.658669), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(151.91304, 52.445438), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(137.84552, 47.588863), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(103.93258, 35.880987), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(426.26609, 212.47186), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(468, 340.72461), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], c(468, 468)) expect_equal(sampleSizeResult$numberOfSubjects1[1, ], c(319.69957, 159.35389), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[2, ], c(351, 255.54346), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects1[3, ], c(351, 351)) expect_equal(sampleSizeResult$numberOfSubjects2[1, ], c(106.56652, 53.117965), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[2, ], c(117, 85.181153), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects2[3, ], c(117, 117)) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(458.50858, 360.32124), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[1, ], c(0.4680288, 0.27467837), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[2, ], c(0.67047266, 0.50642065), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleLower[3, ], c(0.78185284, 0.65781752), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[1, ], c(2.1366206, 3.6406215), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[2, ], c(1.4914851, 1.974643), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScaleUpper[3, ], c(1.2790131, 1.5201784), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[1, ], 0.057428091, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[2, ], 0.11367628, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesPValueScale[3, ], 0.16847851, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$followUpTime, sampleSizeResult$followUpTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$informationRates, sampleSizeResult$informationRates, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects1, sampleSizeResult$numberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects2, sampleSizeResult$numberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleLower, sampleSizeResult$criticalValuesEffectScaleLower, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScaleUpper, sampleSizeResult$criticalValuesEffectScaleUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesPValueScale, sampleSizeResult$criticalValuesPValueScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_plan_section("Testing the Sample Size Calculation of Survival Designs for Other Parameter Variants") test_that("'getSampleSizeSurvival': For fixed sample design, determine necessary accrual time if 200 subjects and 30 subjects per time unit can be recruited", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival( accrualTime = c(0), accrualIntensity = c(30), maxNumberOfSubjects = 120 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$followUpTime, c(14.350651, 4.1854022, 1.0840261), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsFixed, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed, c(120, 120, 120)) expect_equal(sampleSizeResult$nFixed1, c(60, 60, 60)) expect_equal(sampleSizeResult$nFixed2, c(60, 60, 60)) expect_equal(sampleSizeResult$analysisTime[1, ], c(18.350651, 8.1854022, 5.0840261), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(18.350651, 8.1854022, 5.0840261), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$followUpTime, sampleSizeResult$followUpTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsFixed, sampleSizeResult$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed, sampleSizeResult$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed1, sampleSizeResult$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed2, sampleSizeResult$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Determine necessary accrual time if 200 subjects and if the first 6 time units 20 subjects per time unit can be recruited, then 30 subjects per time unit", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival( beta = 0.01, accrualTime = c(0, 4), accrualIntensity = c(10, 20), maxNumberOfSubjects = 180 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(107.13798, 57.20584, 36.833186), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 11) expect_equal(sampleSizeResult$followUpTime, c(27.319035, 6.0447949, 0.58657023), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsFixed, c(107.13798, 57.20584, 36.833186), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed, c(180, 180, 180)) expect_equal(sampleSizeResult$nFixed1, c(90, 90, 90)) expect_equal(sampleSizeResult$nFixed2, c(90, 90, 90)) expect_equal(sampleSizeResult$analysisTime[1, ], c(38.319035, 17.044795, 11.58657), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(38.319035, 17.044795, 11.58657), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.4603989, 1.6791239, 1.9076838), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$totalAccrualTime, sampleSizeResult$totalAccrualTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$followUpTime, sampleSizeResult$followUpTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsFixed, sampleSizeResult$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed, sampleSizeResult$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed1, sampleSizeResult$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed2, sampleSizeResult$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Determine maximum number of Subjects if the first 6 time units 20 subjects per time unit can be recruited, and after 10 time units 30 subjects per time unit", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(accrualTime = c(0, 3, 5), accrualIntensity = c(20, 30)) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(120, 120, 120)) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 5) expect_equal(sampleSizeResult$followUpTime, c(14.113265, 3.9529427, 0.85781252), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsFixed, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed, c(120, 120, 120)) expect_equal(sampleSizeResult$nFixed1, c(60, 60, 60)) expect_equal(sampleSizeResult$nFixed2, c(60, 60, 60)) expect_equal(sampleSizeResult$analysisTime[1, ], c(19.113265, 8.9529427, 5.8578125), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(19.113265, 8.9529427, 5.8578125), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$totalAccrualTime, sampleSizeResult$totalAccrualTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$followUpTime, sampleSizeResult$followUpTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsFixed, sampleSizeResult$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed, sampleSizeResult$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed1, sampleSizeResult$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed2, sampleSizeResult$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Specify accrual time as a list", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} at <- list("0 - <3" = 20, "3 - Inf" = 30) sampleSizeResult <- getSampleSizeSurvival(accrualTime = at, maxNumberOfSubjects = 120) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 5) expect_equal(sampleSizeResult$followUpTime, c(14.113265, 3.9529427, 0.85781252), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsFixed, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed, c(120, 120, 120)) expect_equal(sampleSizeResult$nFixed1, c(60, 60, 60)) expect_equal(sampleSizeResult$nFixed2, c(60, 60, 60)) expect_equal(sampleSizeResult$analysisTime[1, ], c(19.113265, 8.9529427, 5.8578125), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(19.113265, 8.9529427, 5.8578125), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$totalAccrualTime, sampleSizeResult$totalAccrualTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$followUpTime, sampleSizeResult$followUpTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsFixed, sampleSizeResult$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed, sampleSizeResult$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed1, sampleSizeResult$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed2, sampleSizeResult$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Specify accrual time as a list, if maximum number of subjects need to be calculated", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} at <- list("0 - <3" = 20, "3 - <=5" = 30) sampleSizeResult <- getSampleSizeSurvival(accrualTime = at) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(120, 120, 120)) expect_equal(sampleSizeResult$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 5) expect_equal(sampleSizeResult$followUpTime, c(14.113265, 3.9529427, 0.85781252), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsFixed, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed, c(120, 120, 120)) expect_equal(sampleSizeResult$nFixed1, c(60, 60, 60)) expect_equal(sampleSizeResult$nFixed2, c(60, 60, 60)) expect_equal(sampleSizeResult$analysisTime[1, ], c(19.113265, 8.9529427, 5.8578125), tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, c(19.113265, 8.9529427, 5.8578125), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$totalAccrualTime, sampleSizeResult$totalAccrualTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$followUpTime, sampleSizeResult$followUpTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsFixed, sampleSizeResult$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed, sampleSizeResult$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed1, sampleSizeResult$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed2, sampleSizeResult$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Effect size is based on event rate at specified event time for the reference group and hazard ratio", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, pi2 = 0.3, eventTime = 24) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$pi1, 0.16333997, tolerance = 1e-07) expect_equal(sampleSizeResult$median1, 93.281194, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 46.640597, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.007430728, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda2, 0.014861456, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 532.72433, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 266.36217, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 266.36217, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 44.393694, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 11.816947, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 18) expect_equal(sampleSizeResult$studyDuration, 16.704001, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 32.927229, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 65.76941, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 64.676952, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 58.952743, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 524.59793, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 532.72433, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 531.021, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.37730742, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.61425355, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$pi1, sampleSizeResult$pi1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda2, sampleSizeResult$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Effect size is based on hazard rate for the reference group and hazard ratio", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalPatientNumber} # @refFS[Formula]{fs:sampleSizeSurvivalEventsPerStage} sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), hazardRatio = 0.5, lambda2 = 0.02) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult$lambda1, 0.01, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 406.47112, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 203.23556, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 203.23556, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 33.872594, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 11.754955, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 18) expect_equal(sampleSizeResult$studyDuration, 16.691007, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 32.927229, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 65.854457, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 65.76941, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 64.676952, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 58.952743, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 398.17083, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 406.47112, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 404.73134, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.37730742, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.61425355, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$lambda1, sampleSizeResult$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time and hazard ratios", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalExponentialPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalGeneralPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialSurvival} sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), hazardRatio = c(1.5, 1.8, 2)) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(63.558499, 27.766537, 18.999624), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.350554, 13.286013, 13.241069), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(18, 18, 18)) expect_equal(sampleSizeResult$studyDuration, c(17.025453, 17.011925, 17.002504), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(96.227483, 45.789578, 32.927229), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(192.20642, 91.460887, 65.76941), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(189.01379, 89.941683, 64.676952), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(172.2852, 81.981429, 58.952743), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.76855, 2.2853938, 2.6503587), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], c(1.3298684, 1.5117519, 1.6279922), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time as a list and hazard ratios", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalExponentialPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalGeneralPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialSurvival} pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(63.558499, 27.766537, 18.999624), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.350554, 13.286013, 13.241069), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(18, 18, 18)) expect_equal(sampleSizeResult$studyDuration, c(17.025453, 17.011925, 17.002504), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(96.227483, 45.789578, 32.927229), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(192.20642, 91.460887, 65.76941), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(189.01379, 89.941683, 64.676952), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(172.2852, 81.981429, 58.952743), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.76855, 2.2853938, 2.6503587), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], c(1.3298684, 1.5117519, 1.6279922), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time for both treatment arms", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalExponentialPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalGeneralPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialSurvival} sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = c(0, 5, 10), lambda2 = c(0.01, 0.02, 0.04), lambda1 = c(0.015, 0.03, 0.06)) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, TRUE) expect_equal(sampleSizeResult$hazardRatio, 1.5, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, 381.35099, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, 381.35099, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 192.45497, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 63.558499, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 13.350554, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], 18) expect_equal(sampleSizeResult$studyDuration, 17.025453, tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, 18) expect_equal(sampleSizeResult$eventsPerStage[1, ], 96.227483, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], 192.45497, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, 192.20642, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, 189.01379, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, 172.2852, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 762.70199, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.76855, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 1.3298684, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Specification of piecewise exponential survival time as a list", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalExponentialPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalGeneralPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialSurvival} pws <- list("0 - <5" = 0.01, "5 - <10" = 0.02, ">=10" = 0.04) sampleSizeResult <- getSampleSizeSurvival(design = getDesignGroupSequential(kMax = 2), piecewiseSurvivalTime = pws, hazardRatio = c(1.5, 1.8, 2)) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeResult$maxNumberOfSubjects, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects1, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects2, c(381.35099, 166.59922, 113.99774), tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, c(63.558499, 27.766537, 18.999624), tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.59039494, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.20960506, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], c(13.350554, 13.286013, 13.241069), tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[2, ], c(18, 18, 18)) expect_equal(sampleSizeResult$studyDuration, c(17.025453, 17.011925, 17.002504), tolerance = 1e-07) expect_equal(sampleSizeResult$maxStudyDuration, c(18, 18, 18)) expect_equal(sampleSizeResult$eventsPerStage[1, ], c(96.227483, 45.789578, 32.927229), tolerance = 1e-07) expect_equal(sampleSizeResult$eventsPerStage[2, ], c(192.45497, 91.579156, 65.854457), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH0, c(192.20642, 91.460887, 65.76941), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH01, c(189.01379, 89.941683, 64.676952), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedEventsH1, c(172.2852, 81.981429, 58.952743), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, c(762.70199, 333.19844, 227.99548), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], c(1.76855, 2.2853938, 2.6503587), tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], c(1.3298684, 1.5117519, 1.6279922), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects1, sampleSizeResult$maxNumberOfSubjects1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects2, sampleSizeResult$maxNumberOfSubjects2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$rejectPerStage, sampleSizeResult$rejectPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$earlyStop, sampleSizeResult$earlyStop, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxStudyDuration, sampleSizeResult$maxStudyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsPerStage, sampleSizeResult$eventsPerStage, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH0, sampleSizeResult$expectedEventsH0, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH01, sampleSizeResult$expectedEventsH01, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedEventsH1, sampleSizeResult$expectedEventsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$numberOfSubjects, sampleSizeResult$numberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$expectedNumberOfSubjectsH1, sampleSizeResult$expectedNumberOfSubjectsH1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Specify effect size based on median survival times (median1 = 5, median2 = 3)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:lambdabymedian} sampleSizeResult <- getSampleSizeSurvival(lambda1 = log(2) / 5, lambda2 = log(2) / 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$median1, 5) expect_equal(sampleSizeResult$median2, 3) expect_equal(sampleSizeResult$hazardRatio, 0.6, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 11.772201, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsFixed, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed, 141.26641, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, 70.633206, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, 70.633206, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 18) expect_equal(sampleSizeResult$studyDuration, 18) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsFixed, sampleSizeResult$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed, sampleSizeResult$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed1, sampleSizeResult$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed2, sampleSizeResult$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:lambdabymedian} sampleSizeResult2 <- getSampleSizeSurvival(median1 = 5, median2 = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult2' with expected results expect_equal(sampleSizeResult2$directionUpper, FALSE) expect_equal(sampleSizeResult2$lambda1, 0.13862944, tolerance = 1e-07) expect_equal(sampleSizeResult2$lambda2, 0.23104906, tolerance = 1e-07) expect_equal(sampleSizeResult2$hazardRatio, 0.6, tolerance = 1e-07) expect_equal(sampleSizeResult2$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeResult2$accrualIntensity, 11.772201, tolerance = 1e-07) expect_equal(sampleSizeResult2$eventsFixed, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed, 141.26641, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed1, 70.633206, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed2, 70.633206, tolerance = 1e-07) expect_equal(sampleSizeResult2$analysisTime[1, ], 18) expect_equal(sampleSizeResult2$studyDuration, 18) expect_equal(sampleSizeResult2$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult2), NA))) expect_output(print(sampleSizeResult2)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult2), NA))) expect_output(summary(sampleSizeResult2)$show()) sampleSizeResult2CodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult2, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResult2CodeBased$directionUpper, sampleSizeResult2$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$lambda1, sampleSizeResult2$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$lambda2, sampleSizeResult2$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$hazardRatio, sampleSizeResult2$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$maxNumberOfEvents, sampleSizeResult2$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$accrualIntensity, sampleSizeResult2$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$eventsFixed, sampleSizeResult2$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$nFixed, sampleSizeResult2$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$nFixed1, sampleSizeResult2$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$nFixed2, sampleSizeResult2$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$analysisTime, sampleSizeResult2$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$studyDuration, sampleSizeResult2$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$criticalValuesEffectScale, sampleSizeResult2$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult2), "character") df <- as.data.frame(sampleSizeResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:lambdabymedian} sampleSizeResult <- getSampleSizeSurvival( lambda1 = getLambdaByMedian(median = 5, kappa = 2), lambda2 = getLambdaByMedian(median = 3, kappa = 2), kappa = 2 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$median1, 5) expect_equal(sampleSizeResult$median2, 3) expect_equal(sampleSizeResult$hazardRatio, 0.36, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 30.078926, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 2.6040472, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsFixed, 30.078926, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed, 31.248566, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, 15.624283, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, 15.624283, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 18) expect_equal(sampleSizeResult$studyDuration, 18) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.48932026, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsFixed, sampleSizeResult$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed, sampleSizeResult$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed1, sampleSizeResult$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed2, sampleSizeResult$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:lambdabymedian} sampleSizeResult2 <- getSampleSizeSurvival(median1 = 5, median2 = 3, kappa = 2) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult2' with expected results expect_equal(sampleSizeResult2$directionUpper, FALSE) expect_equal(sampleSizeResult2$lambda1, 0.16651092, tolerance = 1e-07) expect_equal(sampleSizeResult2$lambda2, 0.2775182, tolerance = 1e-07) expect_equal(sampleSizeResult2$hazardRatio, 0.36, tolerance = 1e-07) expect_equal(sampleSizeResult2$maxNumberOfEvents, 30.078926, tolerance = 1e-07) expect_equal(sampleSizeResult2$accrualIntensity, 2.6040472, tolerance = 1e-07) expect_equal(sampleSizeResult2$eventsFixed, 30.078926, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed, 31.248566, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed1, 15.624283, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed2, 15.624283, tolerance = 1e-07) expect_equal(sampleSizeResult2$analysisTime[1, ], 18) expect_equal(sampleSizeResult2$studyDuration, 18) expect_equal(sampleSizeResult2$criticalValuesEffectScale[1, ], 0.48932026, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult2), NA))) expect_output(print(sampleSizeResult2)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult2), NA))) expect_output(summary(sampleSizeResult2)$show()) sampleSizeResult2CodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult2, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResult2CodeBased$directionUpper, sampleSizeResult2$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$lambda1, sampleSizeResult2$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$lambda2, sampleSizeResult2$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$hazardRatio, sampleSizeResult2$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$maxNumberOfEvents, sampleSizeResult2$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$accrualIntensity, sampleSizeResult2$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$eventsFixed, sampleSizeResult2$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$nFixed, sampleSizeResult2$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$nFixed1, sampleSizeResult2$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$nFixed2, sampleSizeResult2$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$analysisTime, sampleSizeResult2$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$studyDuration, sampleSizeResult2$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$criticalValuesEffectScale, sampleSizeResult2$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult2), "character") df <- as.data.frame(sampleSizeResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Specify effect size based on rates with kappa = 3", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:lambdabypi} sampleSizeResult <- getSampleSizeSurvival(lambda1 = (-log(1 - 0.23))^(1 / 3) / 14, lambda2 = (-log(1 - 0.38))^(1 / 3) / 14, kappa = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$median1, 19.378531, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 15.845881, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 0.54674726, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 86.124472, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualIntensity, 30.926108, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsFixed, 86.124472, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed, 371.1133, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, 185.55665, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, 185.55665, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 18) expect_equal(sampleSizeResult$studyDuration, 18) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.65547761, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualIntensity, sampleSizeResult$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsFixed, sampleSizeResult$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed, sampleSizeResult$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed1, sampleSizeResult$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed2, sampleSizeResult$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:lambdabypi} sampleSizeResult2 <- getSampleSizeSurvival(pi1 = 0.23, pi2 = 0.38, eventTime = 14, kappa = 3) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult2' with expected results expect_equal(sampleSizeResult2$directionUpper, FALSE) expect_equal(sampleSizeResult2$median1, 19.378531, tolerance = 1e-07) expect_equal(sampleSizeResult2$median2, 15.845881, tolerance = 1e-07) expect_equal(sampleSizeResult2$lambda1, 0.045668945, tolerance = 1e-07) expect_equal(sampleSizeResult2$lambda2, 0.055850291, tolerance = 1e-07) expect_equal(sampleSizeResult2$hazardRatio, 0.54674726, tolerance = 1e-07) expect_equal(sampleSizeResult2$maxNumberOfEvents, 86.124472, tolerance = 1e-07) expect_equal(sampleSizeResult2$accrualIntensity, 30.926108, tolerance = 1e-07) expect_equal(sampleSizeResult2$eventsFixed, 86.124472, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed, 371.1133, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed1, 185.55665, tolerance = 1e-07) expect_equal(sampleSizeResult2$nFixed2, 185.55665, tolerance = 1e-07) expect_equal(sampleSizeResult2$analysisTime[1, ], 18) expect_equal(sampleSizeResult2$studyDuration, 18) expect_equal(sampleSizeResult2$criticalValuesEffectScale[1, ], 0.65547761, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult2), NA))) expect_output(print(sampleSizeResult2)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult2), NA))) expect_output(summary(sampleSizeResult2)$show()) sampleSizeResult2CodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult2, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResult2CodeBased$directionUpper, sampleSizeResult2$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$median1, sampleSizeResult2$median1, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$median2, sampleSizeResult2$median2, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$lambda1, sampleSizeResult2$lambda1, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$lambda2, sampleSizeResult2$lambda2, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$hazardRatio, sampleSizeResult2$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$maxNumberOfEvents, sampleSizeResult2$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$accrualIntensity, sampleSizeResult2$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$eventsFixed, sampleSizeResult2$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$nFixed, sampleSizeResult2$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$nFixed1, sampleSizeResult2$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$nFixed2, sampleSizeResult2$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$analysisTime, sampleSizeResult2$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$studyDuration, sampleSizeResult2$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResult2CodeBased$criticalValuesEffectScale, sampleSizeResult2$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult2), "character") df <- as.data.frame(sampleSizeResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': Calculation of maximum number of subjects for given follow-up time", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} sampleSizeResult <- getSampleSizeSurvival( accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 5 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult' with expected results expect_equal(sampleSizeResult$directionUpper, FALSE) expect_equal(sampleSizeResult$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfSubjects, 477.30924, tolerance = 1e-07) expect_equal(sampleSizeResult$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult$accrualTime, c(6, 12.515269), tolerance = 1e-07) expect_equal(sampleSizeResult$totalAccrualTime, 12.515269, tolerance = 1e-07) expect_equal(sampleSizeResult$eventsFixed, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed, 477.30924, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed1, 238.65462, tolerance = 1e-07) expect_equal(sampleSizeResult$nFixed2, 238.65462, tolerance = 1e-07) expect_equal(sampleSizeResult$analysisTime[1, ], 17.515269, tolerance = 1e-07) expect_equal(sampleSizeResult$studyDuration, 17.515269, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) sampleSizeResultCodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResultCodeBased$directionUpper, sampleSizeResult$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median1, sampleSizeResult$median1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$median2, sampleSizeResult$median2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$hazardRatio, sampleSizeResult$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfSubjects, sampleSizeResult$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$maxNumberOfEvents, sampleSizeResult$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$accrualTime, sampleSizeResult$accrualTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$totalAccrualTime, sampleSizeResult$totalAccrualTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$eventsFixed, sampleSizeResult$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed, sampleSizeResult$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed1, sampleSizeResult$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$nFixed2, sampleSizeResult$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$analysisTime, sampleSizeResult$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$studyDuration, sampleSizeResult$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResultCodeBased$criticalValuesEffectScale, sampleSizeResult$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult), "character") df <- as.data.frame(sampleSizeResult) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} sampleSizeResult3 <- getSampleSizeSurvival( accrualTime = c(0, 6), accrualIntensity = c(22), lambda2 = 0.02, lambda1 = c(0.01) ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult3' with expected results expect_equal(sampleSizeResult3$directionUpper, FALSE) expect_equal(sampleSizeResult3$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult3$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult3$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult3$maxNumberOfSubjects, 132) expect_equal(sampleSizeResult3$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult3$followUpTime, 44.431065, tolerance = 1e-07) expect_equal(sampleSizeResult3$eventsFixed, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult3$nFixed, 132) expect_equal(sampleSizeResult3$nFixed1, 66) expect_equal(sampleSizeResult3$nFixed2, 66) expect_equal(sampleSizeResult3$analysisTime[1, ], 50.431065, tolerance = 1e-07) expect_equal(sampleSizeResult3$studyDuration, 50.431065, tolerance = 1e-07) expect_equal(sampleSizeResult3$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult3), NA))) expect_output(print(sampleSizeResult3)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult3), NA))) expect_output(summary(sampleSizeResult3)$show()) sampleSizeResult3CodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult3, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResult3CodeBased$directionUpper, sampleSizeResult3$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$median1, sampleSizeResult3$median1, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$median2, sampleSizeResult3$median2, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$hazardRatio, sampleSizeResult3$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$maxNumberOfSubjects, sampleSizeResult3$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$maxNumberOfEvents, sampleSizeResult3$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$followUpTime, sampleSizeResult3$followUpTime, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$eventsFixed, sampleSizeResult3$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$nFixed, sampleSizeResult3$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$nFixed1, sampleSizeResult3$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$nFixed2, sampleSizeResult3$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$analysisTime, sampleSizeResult3$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$studyDuration, sampleSizeResult3$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResult3CodeBased$criticalValuesEffectScale, sampleSizeResult3$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult3), "character") df <- as.data.frame(sampleSizeResult3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} sampleSizeResult4 <- getSampleSizeSurvival( accrualTime = c(0, 6), accrualIntensity = c(22), lambda2 = 0.02, lambda1 = c(0.01) ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult4' with expected results expect_equal(sampleSizeResult4$directionUpper, FALSE) expect_equal(sampleSizeResult4$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult4$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult4$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult4$maxNumberOfSubjects, 132) expect_equal(sampleSizeResult4$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult4$followUpTime, 44.431065, tolerance = 1e-07) expect_equal(sampleSizeResult4$eventsFixed, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult4$nFixed, 132) expect_equal(sampleSizeResult4$nFixed1, 66) expect_equal(sampleSizeResult4$nFixed2, 66) expect_equal(sampleSizeResult4$analysisTime[1, ], 50.431065, tolerance = 1e-07) expect_equal(sampleSizeResult4$studyDuration, 50.431065, tolerance = 1e-07) expect_equal(sampleSizeResult4$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult4), NA))) expect_output(print(sampleSizeResult4)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult4), NA))) expect_output(summary(sampleSizeResult4)$show()) sampleSizeResult4CodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult4, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResult4CodeBased$directionUpper, sampleSizeResult4$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$median1, sampleSizeResult4$median1, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$median2, sampleSizeResult4$median2, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$hazardRatio, sampleSizeResult4$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$maxNumberOfSubjects, sampleSizeResult4$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$maxNumberOfEvents, sampleSizeResult4$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$followUpTime, sampleSizeResult4$followUpTime, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$eventsFixed, sampleSizeResult4$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$nFixed, sampleSizeResult4$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$nFixed1, sampleSizeResult4$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$nFixed2, sampleSizeResult4$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$analysisTime, sampleSizeResult4$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$studyDuration, sampleSizeResult4$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResult4CodeBased$criticalValuesEffectScale, sampleSizeResult4$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult4), "character") df <- as.data.frame(sampleSizeResult4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} sampleSizeResult6 <- getSampleSizeSurvival( accrualTime = c(0), accrualIntensity = c(22), lambda2 = 0.02, lambda1 = c(0.01), maxNumberOfSubjects = 300 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult6' with expected results expect_equal(sampleSizeResult6$directionUpper, FALSE) expect_equal(sampleSizeResult6$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult6$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult6$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult6$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult6$followUpTime, 9.9154676, tolerance = 1e-07) expect_equal(sampleSizeResult6$eventsFixed, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult6$nFixed, 300) expect_equal(sampleSizeResult6$nFixed1, 150) expect_equal(sampleSizeResult6$nFixed2, 150) expect_equal(sampleSizeResult6$analysisTime[1, ], 23.551831, tolerance = 1e-07) expect_equal(sampleSizeResult6$studyDuration, 23.551831, tolerance = 1e-07) expect_equal(sampleSizeResult6$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult6), NA))) expect_output(print(sampleSizeResult6)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult6), NA))) expect_output(summary(sampleSizeResult6)$show()) sampleSizeResult6CodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult6, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResult6CodeBased$directionUpper, sampleSizeResult6$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$median1, sampleSizeResult6$median1, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$median2, sampleSizeResult6$median2, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$hazardRatio, sampleSizeResult6$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$maxNumberOfEvents, sampleSizeResult6$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$followUpTime, sampleSizeResult6$followUpTime, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$eventsFixed, sampleSizeResult6$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$nFixed, sampleSizeResult6$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$nFixed1, sampleSizeResult6$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$nFixed2, sampleSizeResult6$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$analysisTime, sampleSizeResult6$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$studyDuration, sampleSizeResult6$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResult6CodeBased$criticalValuesEffectScale, sampleSizeResult6$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult6), "character") df <- as.data.frame(sampleSizeResult6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} sampleSizeResult7 <- getSampleSizeSurvival( accrualTime = c(0, 3), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = 44 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult7' with expected results expect_equal(sampleSizeResult7$directionUpper, FALSE) expect_equal(sampleSizeResult7$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult7$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult7$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult7$maxNumberOfSubjects, 135.32074, tolerance = 1e-07) expect_equal(sampleSizeResult7$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult7$accrualTime, c(3, 4.3079386), tolerance = 1e-07) expect_equal(sampleSizeResult7$totalAccrualTime, 4.3079386, tolerance = 1e-07) expect_equal(sampleSizeResult7$eventsFixed, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult7$nFixed, 135.32074, tolerance = 1e-07) expect_equal(sampleSizeResult7$nFixed1, 67.660372, tolerance = 1e-07) expect_equal(sampleSizeResult7$nFixed2, 67.660372, tolerance = 1e-07) expect_equal(sampleSizeResult7$analysisTime[1, ], 48.307942, tolerance = 1e-07) expect_equal(sampleSizeResult7$studyDuration, 48.307942, tolerance = 1e-07) expect_equal(sampleSizeResult7$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult7), NA))) expect_output(print(sampleSizeResult7)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult7), NA))) expect_output(summary(sampleSizeResult7)$show()) sampleSizeResult7CodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult7, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResult7CodeBased$directionUpper, sampleSizeResult7$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$median1, sampleSizeResult7$median1, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$median2, sampleSizeResult7$median2, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$hazardRatio, sampleSizeResult7$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$maxNumberOfSubjects, sampleSizeResult7$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$maxNumberOfEvents, sampleSizeResult7$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$accrualTime, sampleSizeResult7$accrualTime, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$totalAccrualTime, sampleSizeResult7$totalAccrualTime, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$eventsFixed, sampleSizeResult7$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$nFixed, sampleSizeResult7$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$nFixed1, sampleSizeResult7$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$nFixed2, sampleSizeResult7$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$analysisTime, sampleSizeResult7$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$studyDuration, sampleSizeResult7$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResult7CodeBased$criticalValuesEffectScale, sampleSizeResult7$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult7), "character") df <- as.data.frame(sampleSizeResult7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getSampleSizeSurvival} # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} sampleSizeResult8 <- getSampleSizeSurvival( accrualTime = c(0, 6), accrualIntensity = c(22), lambda2 = 0.02, lambda1 = c(0.01) ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeResult8' with expected results expect_equal(sampleSizeResult8$directionUpper, FALSE) expect_equal(sampleSizeResult8$median1, 69.314718, tolerance = 1e-07) expect_equal(sampleSizeResult8$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeResult8$hazardRatio, 0.5, tolerance = 1e-07) expect_equal(sampleSizeResult8$maxNumberOfSubjects, 132) expect_equal(sampleSizeResult8$maxNumberOfEvents, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult8$followUpTime, 44.431065, tolerance = 1e-07) expect_equal(sampleSizeResult8$eventsFixed, 65.345659, tolerance = 1e-07) expect_equal(sampleSizeResult8$nFixed, 132) expect_equal(sampleSizeResult8$nFixed1, 66) expect_equal(sampleSizeResult8$nFixed2, 66) expect_equal(sampleSizeResult8$analysisTime[1, ], 50.431065, tolerance = 1e-07) expect_equal(sampleSizeResult8$studyDuration, 50.431065, tolerance = 1e-07) expect_equal(sampleSizeResult8$criticalValuesEffectScale[1, ], 0.61574672, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult8), NA))) expect_output(print(sampleSizeResult8)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult8), NA))) expect_output(summary(sampleSizeResult8)$show()) sampleSizeResult8CodeBased <- eval(parse(text = getObjectRCode(sampleSizeResult8, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeResult8CodeBased$directionUpper, sampleSizeResult8$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$median1, sampleSizeResult8$median1, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$median2, sampleSizeResult8$median2, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$hazardRatio, sampleSizeResult8$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$maxNumberOfSubjects, sampleSizeResult8$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$maxNumberOfEvents, sampleSizeResult8$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$followUpTime, sampleSizeResult8$followUpTime, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$eventsFixed, sampleSizeResult8$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$nFixed, sampleSizeResult8$nFixed, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$nFixed1, sampleSizeResult8$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$nFixed2, sampleSizeResult8$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$analysisTime, sampleSizeResult8$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$studyDuration, sampleSizeResult8$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeResult8CodeBased$criticalValuesEffectScale, sampleSizeResult8$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeResult8), "character") df <- as.data.frame(sampleSizeResult8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeResult8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': analysis time at last stage equals accrual time + follow-up time", { .skipTestIfDisabled() x1 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, maxNumberOfSubjects = 766, pi2 = 0.05, pi1 = 0.1 ) expect_equal(x1$analysisTime[3], x1$accrualTime + x1$followUpTime) x2 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "P"), accrualTime = 12, maxNumberOfSubjects = 766, lambda2 = 0.005, lambda1 = 0.01 ) expect_equal(x2$analysisTime[3], x2$accrualTime + x2$followUpTime) x3 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "WT", deltaWT = 0), accrualTime = c(0, 12, 15), accrualIntensity = c(20, 30), lambda2 = 0.005, lambda1 = 0.01 ) expect_equal(x3$analysisTime[length(x3$analysisTime)], x3$accrualTime[length(x3$accrualTime)] + x3$followUpTime) x4 <- getSampleSizeSurvival(getDesignGroupSequential(typeOfDesign = "WT", deltaWT = 0), accrualTime = c(0, 12, 15), accrualIntensity = c(40, 60), piecewiseSurvivalTime = c(0, 5), lambda2 = c(0.005, 0.01), hazardRatio = 0.8 ) expect_equal(x4$analysisTime[length(x4$analysisTime)], x4$accrualTime[length(x4$accrualTime)] + x4$followUpTime) }) test_that("'getSampleSizeSurvival': follow-up time is equal for different argument-target constellations", { .skipTestIfDisabled() designGS1 <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) x5 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 1 ) x6 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Schoenfeld", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 14, accrualTime = 8, maxNumberOfSubjects = x5$maxNumberOfSubjects, accountForObservationTimes = TRUE, allocationRatioPlanned = 1 ) expect_equal(x5$followUpTime, x6$followUpTime) .skipTestIfDisabled() x7 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, median1 = 44, median2 = 66, accrualTime = 43, followUpTime = 22, accountForObservationTimes = TRUE, allocationRatioPlanned = 2 ) x8 <- getSampleSizeSurvival(designGS1, typeOfComputation = "Freedman", thetaH0 = 1, median1 = 44, median2 = 66, accrualTime = 43, maxNumberOfSubjects = x7$maxNumberOfSubjects, accountForObservationTimes = TRUE, allocationRatioPlanned = 2 ) expect_equal(x7$followUpTime, x8$followUpTime) x9 <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 16, accrualTime = 8, followUpTime = 10, accountForObservationTimes = TRUE, allocationRatioPlanned = 0.2 ) x10 <- getSampleSizeSurvival(designGS1, typeOfComputation = "HsiehFreedman", thetaH0 = 1, pi1 = 0.4, pi2 = 0.2, eventTime = 16, accrualTime = 8, maxNumberOfSubjects = x9$maxNumberOfSubjects, accountForObservationTimes = TRUE, allocationRatioPlanned = 0.2 ) expect_equal(x9$followUpTime, x10$followUpTime) }) test_that("'getSampleSizeSurvival': testing expected warnings and errors", { .skipTestIfDisabled() expect_error(getSampleSizeSurvival( accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01, 0.015), followUpTime = 5 ), paste0( "Illegal argument: the calculation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'lambda1'; lambda1 = c(0.01, 0.015)" ), fixed = TRUE ) expect_error(getSampleSizeSurvival( accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, median1 = c(5, 6), followUpTime = 5 ), paste0( "Illegal argument: the calculation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'lambda1'; lambda1 = c(0.139, 0.116)" ), fixed = TRUE ) expect_error(getSampleSizeSurvival( accrualTime = c(0, 6), accrualIntensity = c(22, 53), median2 = 4, median1 = c(5, 6), followUpTime = 5 ), paste0( "Illegal argument: the calculation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'median1'; median1 = c(5, 6)" ), fixed = TRUE ) expect_error(getSampleSizeSurvival( accrualTime = c(0, 6), accrualIntensity = c(22, 53), pi2 = 0.213, pi1 = c(0.113, 0.165), followUpTime = 5 ), paste0( "Illegal argument: the calculation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'pi1'; pi1 = c(0.113, 0.165)" ), fixed = TRUE ) expect_error(getSampleSizeSurvival( accrualTime = c(0), pi1 = c(0.4, 0.5), accrualIntensity = c(22), followUpTime = 6 ), paste0( "Illegal argument: the calculation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single 'pi1'; pi1 = c(0.4, 0.5)" ), fixed = TRUE ) expect_error(getSampleSizeSurvival(lambda2 = -1, hazardRatio = 2), "Argument out of bounds: 'lambda2' (-1) must be >= 0", fixed = TRUE ) expect_error(getSampleSizeSurvival(lambda2 = 0, hazardRatio = 2), "Illegal argument: 'lambda2' (0) not allowed: at least one lambda value must be > 0", fixed = TRUE ) expect_error(getSampleSizeSurvival(lambda2 = 0.9, hazardRatio = 0.8, kappa = 0), "Argument out of bounds: 'kappa' (0) must be > 0", fixed = TRUE ) expect_error(getSampleSizeSurvival(pi1 = getPiByMedian(0.1), pi2 = getPiByMedian(0.2))) expect_warning(getSampleSizeSurvival(median1 = 0.1, median2 = 0.2, eventTime = 0.5), "'eventTime' (0.5) will be ignored", fixed = TRUE ) expect_error(getSampleSizeSurvival( lambda2 = 0.2, hazardRatio = c(0.6, 0.7), followUpTime = 8, accrualIntensity = 30, accrualTime = 0 )) expect_error(getSampleSizeSurvival( lambda1 = c(0.02, 0.03), lambda2 = 0.2, hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0 )) expect_error(getSampleSizeSurvival( lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = c(0.6, 0.8), followUpTime = 8, accrualIntensity = 30, accrualTime = 0 )) expect_warning(getSampleSizeSurvival(median1 = 0.1, median2 = 0.2, eventTime = 4), "'eventTime' (4) will be ignored", fixed = TRUE ) .skipTestIfDisabled() expect_warning(getSampleSizeSurvival( accrualTime = c(0, 6), accrualIntensity = c(22, 53), lambda2 = 0.02, lambda1 = c(0.01), followUpTime = -1 ), "Accrual duration longer than maximal study duration (time to maximal number of events); followUpTime = -1", fixed = TRUE ) expect_warning(getSampleSizeSurvival( accrualTime = c(0, 6, 30), pi1 = 0.4, accrualIntensity = c(0.22, 0.53), maxNumberOfSubjects = 1000 ), "Accrual duration longer than maximal study duration (time to maximal number of events); followUpTime = -17.501", fixed = TRUE ) }) test_plan_section("Testing Other Functions of the Sample Size Calculator for Survival Designs") test_that("'getEventProbabilities': check expected events over time for overall survival (case 1)", { .skipTestIfDisabled() design <- getDesignGroupSequential( sided = 1, alpha = 0.025, beta = 0.2, informationRates = c(0.33, 0.7, 1), futilityBounds = c(0, 0), bindingFutility = FALSE ) piecewiseSurvivalTime <- list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007 ) accrualTime <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45 ) powerResults <- getPowerSurvival( design = design, typeOfComputation = "Schoenfeld", thetaH0 = 1, directionUpper = FALSE, dropoutRate1 = 0.05, dropoutRate2 = 0.05, dropoutTime = 12, allocationRatioPlanned = 1, accrualTime = accrualTime, piecewiseSurvivalTime = piecewiseSurvivalTime, hazardRatio = seq(0.6, 1, 0.05), maxNumberOfEvents = 404, maxNumberOfSubjects = 1405 ) piecewiseSurvivalTimeOS <- list( "0 - <14" = 0.015, "14 - <24" = 0.01, "24 - <44" = 0.005, ">=44" = 0.0025 ) timeOS <- c(powerResults$analysisTime[2:3, 4], 17 + 3.5 * 12) eventsOS <- getEventProbabilities( timeOS, accrualTime = accrualTime, piecewiseSurvivalTime = piecewiseSurvivalTimeOS, kappa = 1, allocationRatioPlanned = 1, hazardRatio = 0.8, dropoutRate1 = 0.05, dropoutRate2 = 0.05, dropoutTime = 12, maxNumberOfSubjects = 1405 )$cumulativeEventProbabilities eventsOS <- eventsOS * 1405 expect_equal(round(timeOS, 2), c(37.60, 46.72, 59.00)) expect_equal(round(eventsOS, 1), c(194.1, 288.7, 365.1)) }) test_that("'getEventProbabilities': check expected events over time for overall survival (case 2)", { .skipTestIfDisabled() accrualTime <- list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45 ) piecewiseSurvivalTimeOS <- list( "0 - <14" = 0.015, "14 - <24" = 0.01, "24 - <44" = 0.005, ">=44" = 0.0025 ) timeOS <- c(37.59823, 46.71658, 59) eventsOS <- getEventProbabilities( timeOS, accrualTime = accrualTime, piecewiseSurvivalTime = piecewiseSurvivalTimeOS, kappa = 1, allocationRatioPlanned = 1, hazardRatio = 0.8, dropoutRate1 = 0.05, dropoutRate2 = 0.05, dropoutTime = 12, maxNumberOfSubjects = 1405 ) # @refFS[Formula]{fs:sampleSizeSurvivalExponentialPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalGeneralPieceWiseAccrual} # @refFS[Formula]{fs:pieceWiseExponentialSurvival} # @refFS[Formula]{fs:pieceWiseExponentialSurvivalWithDropOuts} ## Comparison of the results of EventProbabilities object 'eventsOS' with expected results expect_equal(eventsOS$lambda1, c(0.012, 0.008, 0.004, 0.002), tolerance = 1e-07) expect_equal(eventsOS$cumulativeEventProbabilities, c(0.13811859, 0.20546928, 0.2598385), tolerance = 1e-07) expect_equal(eventsOS$eventProbabilities1, c(0.12437783, 0.18544801, 0.23527681), tolerance = 1e-07) expect_equal(eventsOS$eventProbabilities2, c(0.15185935, 0.22549055, 0.28440019), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(eventsOS), NA))) expect_output(print(eventsOS)$show()) invisible(capture.output(expect_error(summary(eventsOS), NA))) expect_output(summary(eventsOS)$show()) eventsOSCodeBased <- eval(parse(text = getObjectRCode(eventsOS, stringWrapParagraphWidth = NULL))) expect_equal(eventsOSCodeBased$lambda1, eventsOS$lambda1, tolerance = 1e-05) expect_equal(eventsOSCodeBased$cumulativeEventProbabilities, eventsOS$cumulativeEventProbabilities, tolerance = 1e-05) expect_equal(eventsOSCodeBased$eventProbabilities1, eventsOS$eventProbabilities1, tolerance = 1e-05) expect_equal(eventsOSCodeBased$eventProbabilities2, eventsOS$eventProbabilities2, tolerance = 1e-05) expect_type(names(eventsOS), "character") df <- as.data.frame(eventsOS) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(eventsOS) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getNumberOfSubjects': check the number of recruited subjects at given time vector", { .skipTestIfDisabled() accrualTime1 <- list( "0 - <12" = 12, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45 ) numberOfSubjects1 <- getNumberOfSubjects( time = 1:3, accrualTime = getAccrualTime(accrualTime1, maxNumberOfSubjects = 1405) ) ## Comparison of the results of NumberOfSubjects object 'numberOfSubjects1' with expected results expect_equal(numberOfSubjects1$numberOfSubjects, c(12, 24, 36), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(numberOfSubjects1), NA))) expect_output(print(numberOfSubjects1)$show()) invisible(capture.output(expect_error(summary(numberOfSubjects1), NA))) expect_output(summary(numberOfSubjects1)$show()) numberOfSubjects1CodeBased <- eval(parse(text = getObjectRCode(numberOfSubjects1, stringWrapParagraphWidth = NULL))) expect_equal(numberOfSubjects1CodeBased$numberOfSubjects, numberOfSubjects1$numberOfSubjects, tolerance = 1e-05) expect_type(names(numberOfSubjects1), "character") df <- as.data.frame(numberOfSubjects1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(numberOfSubjects1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } accrualTime2 <- list( "0 - <12" = 12, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39 ) # @refFS[Formula]{fs:sampleSizeSurvivalExponentialPieceWiseAccrual} # @refFS[Formula]{fs:sampleSizeSurvivalGeneralPieceWiseAccrual} numberOfSubjects2 <- getNumberOfSubjects(time = 1:3, accrualTime = getAccrualTime(accrualTime2)) ## Comparison of the results of NumberOfSubjects object 'numberOfSubjects2' with expected results expect_equal(numberOfSubjects2$maxNumberOfSubjects, 264) expect_equal(numberOfSubjects2$numberOfSubjects, c(12, 24, 36)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(numberOfSubjects2), NA))) expect_output(print(numberOfSubjects2)$show()) invisible(capture.output(expect_error(summary(numberOfSubjects2), NA))) expect_output(summary(numberOfSubjects2)$show()) numberOfSubjects2CodeBased <- eval(parse(text = getObjectRCode(numberOfSubjects2, stringWrapParagraphWidth = NULL))) expect_equal(numberOfSubjects2CodeBased$maxNumberOfSubjects, numberOfSubjects2$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(numberOfSubjects2CodeBased$numberOfSubjects, numberOfSubjects2$numberOfSubjects, tolerance = 1e-05) expect_type(names(numberOfSubjects2), "character") df <- as.data.frame(numberOfSubjects2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(numberOfSubjects2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': check the calculation of 'maxNumberOfSubjects' for given 'followUpTime'", { .skipTestIfDisabled() sampleSizeSurvival1 <- getSampleSizeSurvival( lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival1' with expected results expect_equal(sampleSizeSurvival1$directionUpper, FALSE) expect_equal(sampleSizeSurvival1$lambda1, c(0.012, 0.018), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$maxNumberOfSubjects, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$accrualTime, 16.155013, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$eventsFixed, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$nFixed, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$nFixed1, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$nFixed2, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$analysisTime[1, ], 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$studyDuration, 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeSurvival1), NA))) expect_output(print(sampleSizeSurvival1)$show()) invisible(capture.output(expect_error(summary(sampleSizeSurvival1), NA))) expect_output(summary(sampleSizeSurvival1)$show()) sampleSizeSurvival1CodeBased <- eval(parse(text = getObjectRCode(sampleSizeSurvival1, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeSurvival1CodeBased$directionUpper, sampleSizeSurvival1$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$lambda1, sampleSizeSurvival1$lambda1, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$maxNumberOfSubjects, sampleSizeSurvival1$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$maxNumberOfEvents, sampleSizeSurvival1$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$accrualTime, sampleSizeSurvival1$accrualTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$eventsFixed, sampleSizeSurvival1$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$nFixed, sampleSizeSurvival1$nFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$nFixed1, sampleSizeSurvival1$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$nFixed2, sampleSizeSurvival1$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$analysisTime, sampleSizeSurvival1$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$studyDuration, sampleSizeSurvival1$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$criticalValuesEffectScale, sampleSizeSurvival1$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeSurvival1), "character") df <- as.data.frame(sampleSizeSurvival1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeSurvival1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } sampleSizeSurvival2 <- getSampleSizeSurvival( piecewiseSurvivalTime = list( "<12" = 0.02, ">=12" = 0.03 ), hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival2' with expected results expect_equal(sampleSizeSurvival2$directionUpper, FALSE) expect_equal(sampleSizeSurvival2$lambda1, c(0.012, 0.018), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$maxNumberOfSubjects, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$accrualTime, 16.155013, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$eventsFixed, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$nFixed, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$nFixed1, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$nFixed2, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$analysisTime[1, ], 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$studyDuration, 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeSurvival2), NA))) expect_output(print(sampleSizeSurvival2)$show()) invisible(capture.output(expect_error(summary(sampleSizeSurvival2), NA))) expect_output(summary(sampleSizeSurvival2)$show()) sampleSizeSurvival2CodeBased <- eval(parse(text = getObjectRCode(sampleSizeSurvival2, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeSurvival2CodeBased$directionUpper, sampleSizeSurvival2$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$lambda1, sampleSizeSurvival2$lambda1, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$maxNumberOfSubjects, sampleSizeSurvival2$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$maxNumberOfEvents, sampleSizeSurvival2$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$accrualTime, sampleSizeSurvival2$accrualTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$eventsFixed, sampleSizeSurvival2$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$nFixed, sampleSizeSurvival2$nFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$nFixed1, sampleSizeSurvival2$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$nFixed2, sampleSizeSurvival2$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$analysisTime, sampleSizeSurvival2$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$studyDuration, sampleSizeSurvival2$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$criticalValuesEffectScale, sampleSizeSurvival2$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeSurvival2), "character") df <- as.data.frame(sampleSizeSurvival2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeSurvival2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } sampleSizeSurvival3 <- getSampleSizeSurvival( lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = 0.6, followUpTime = 8, accrualIntensity = 30, accrualTime = 0 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival3' with expected results expect_equal(sampleSizeSurvival3$directionUpper, FALSE) expect_equal(sampleSizeSurvival3$lambda1, c(0.012, 0.018), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$maxNumberOfSubjects, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$maxNumberOfEvents, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$accrualTime, 16.155013, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$eventsFixed, 120.3157, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$nFixed, 484.65038, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$nFixed1, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$nFixed2, 242.32519, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$analysisTime[1, ], 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$studyDuration, 24.155014, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$criticalValuesEffectScale[1, ], 0.6995143, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeSurvival3), NA))) expect_output(print(sampleSizeSurvival3)$show()) invisible(capture.output(expect_error(summary(sampleSizeSurvival3), NA))) expect_output(summary(sampleSizeSurvival3)$show()) sampleSizeSurvival3CodeBased <- eval(parse(text = getObjectRCode(sampleSizeSurvival3, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeSurvival3CodeBased$directionUpper, sampleSizeSurvival3$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$lambda1, sampleSizeSurvival3$lambda1, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$maxNumberOfSubjects, sampleSizeSurvival3$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$maxNumberOfEvents, sampleSizeSurvival3$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$accrualTime, sampleSizeSurvival3$accrualTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$eventsFixed, sampleSizeSurvival3$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$nFixed, sampleSizeSurvival3$nFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$nFixed1, sampleSizeSurvival3$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$nFixed2, sampleSizeSurvival3$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$analysisTime, sampleSizeSurvival3$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$studyDuration, sampleSizeSurvival3$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$criticalValuesEffectScale, sampleSizeSurvival3$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeSurvival3), "character") df <- as.data.frame(sampleSizeSurvival3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeSurvival3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } sampleSizeSurvival4 <- getSampleSizeSurvival( lambda2 = c(0.02, 0.03), piecewiseSurvivalTime = c(0, 12), hazardRatio = 0.8, followUpTime = 8, accrualIntensity = 30, accrualTime = 0 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival4' with expected results expect_equal(sampleSizeSurvival4$directionUpper, FALSE) expect_equal(sampleSizeSurvival4$lambda1, c(0.016, 0.024), tolerance = 1e-07) expect_equal(sampleSizeSurvival4$maxNumberOfSubjects, 1325.4661, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$maxNumberOfEvents, 630.52017, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$accrualTime, 44.182203, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$eventsFixed, 630.52017, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$nFixed, 1325.4661, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$nFixed1, 662.73305, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$nFixed2, 662.73305, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$analysisTime[1, ], 52.182201, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$studyDuration, 52.182201, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$criticalValuesEffectScale[1, ], 0.85546574, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeSurvival4), NA))) expect_output(print(sampleSizeSurvival4)$show()) invisible(capture.output(expect_error(summary(sampleSizeSurvival4), NA))) expect_output(summary(sampleSizeSurvival4)$show()) sampleSizeSurvival4CodeBased <- eval(parse(text = getObjectRCode(sampleSizeSurvival4, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeSurvival4CodeBased$directionUpper, sampleSizeSurvival4$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$lambda1, sampleSizeSurvival4$lambda1, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$maxNumberOfSubjects, sampleSizeSurvival4$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$maxNumberOfEvents, sampleSizeSurvival4$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$accrualTime, sampleSizeSurvival4$accrualTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$eventsFixed, sampleSizeSurvival4$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$nFixed, sampleSizeSurvival4$nFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$nFixed1, sampleSizeSurvival4$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$nFixed2, sampleSizeSurvival4$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$analysisTime, sampleSizeSurvival4$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$studyDuration, sampleSizeSurvival4$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$criticalValuesEffectScale, sampleSizeSurvival4$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeSurvival4), "character") df <- as.data.frame(sampleSizeSurvival4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeSurvival4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } sampleSizeSurvival5 <- getSampleSizeSurvival( lambda1 = 0.03, lambda2 = 0.02, followUpTime = 8, accrualIntensity = 30, accrualTime = 0 ) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival5' with expected results expect_equal(sampleSizeSurvival5$directionUpper, TRUE) expect_equal(sampleSizeSurvival5$median1, 23.104906, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$median2, 34.657359, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$hazardRatio, 1.5, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$maxNumberOfSubjects, 557.38443, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$maxNumberOfEvents, 190.96804, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$accrualTime, 18.579481, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$eventsFixed, 190.96804, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$nFixed, 557.38443, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$nFixed1, 278.69222, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$nFixed2, 278.69222, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$analysisTime[1, ], 26.579477, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$studyDuration, 26.579477, tolerance = 1e-07) expect_equal(sampleSizeSurvival5$criticalValuesEffectScale[1, ], 1.327981, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeSurvival5), NA))) expect_output(print(sampleSizeSurvival5)$show()) invisible(capture.output(expect_error(summary(sampleSizeSurvival5), NA))) expect_output(summary(sampleSizeSurvival5)$show()) sampleSizeSurvival5CodeBased <- eval(parse(text = getObjectRCode(sampleSizeSurvival5, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeSurvival5CodeBased$directionUpper, sampleSizeSurvival5$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$median1, sampleSizeSurvival5$median1, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$median2, sampleSizeSurvival5$median2, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$hazardRatio, sampleSizeSurvival5$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$maxNumberOfSubjects, sampleSizeSurvival5$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$maxNumberOfEvents, sampleSizeSurvival5$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$accrualTime, sampleSizeSurvival5$accrualTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$eventsFixed, sampleSizeSurvival5$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$nFixed, sampleSizeSurvival5$nFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$nFixed1, sampleSizeSurvival5$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$nFixed2, sampleSizeSurvival5$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$analysisTime, sampleSizeSurvival5$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$studyDuration, sampleSizeSurvival5$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeSurvival5CodeBased$criticalValuesEffectScale, sampleSizeSurvival5$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeSurvival5), "character") df <- as.data.frame(sampleSizeSurvival5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeSurvival5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSampleSizeSurvival': check calculations for fixed design with relative accrual intensity", { .skipTestIfDisabled() # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} sampleSizeSurvival1 <- getSampleSizeSurvival(accrualIntensity = 0.1, accrualTime = 10) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival1' with expected results expect_equal(sampleSizeSurvival1$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeSurvival1$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeSurvival1$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$accrualIntensity, c(16.554072, 7.5582097, 4.2441939), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$eventsFixed, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$nFixed, c(165.54072, 75.582097, 42.441939), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$nFixed1, c(82.77036, 37.791048, 21.220969), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$nFixed2, c(82.77036, 37.791048, 21.220969), tolerance = 1e-07) expect_equal(sampleSizeSurvival1$analysisTime[1, ], 16) expect_equal(sampleSizeSurvival1$studyDuration, 16) expect_equal(sampleSizeSurvival1$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeSurvival1), NA))) expect_output(print(sampleSizeSurvival1)$show()) invisible(capture.output(expect_error(summary(sampleSizeSurvival1), NA))) expect_output(summary(sampleSizeSurvival1)$show()) sampleSizeSurvival1CodeBased <- eval(parse(text = getObjectRCode(sampleSizeSurvival1, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeSurvival1CodeBased$directionUpper, sampleSizeSurvival1$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$median1, sampleSizeSurvival1$median1, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$median2, sampleSizeSurvival1$median2, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$lambda1, sampleSizeSurvival1$lambda1, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$lambda2, sampleSizeSurvival1$lambda2, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$hazardRatio, sampleSizeSurvival1$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$maxNumberOfEvents, sampleSizeSurvival1$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$accrualIntensity, sampleSizeSurvival1$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$eventsFixed, sampleSizeSurvival1$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$nFixed, sampleSizeSurvival1$nFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$nFixed1, sampleSizeSurvival1$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$nFixed2, sampleSizeSurvival1$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$analysisTime, sampleSizeSurvival1$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$studyDuration, sampleSizeSurvival1$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeSurvival1CodeBased$criticalValuesEffectScale, sampleSizeSurvival1$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeSurvival1), "character") df <- as.data.frame(sampleSizeSurvival1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeSurvival1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} sampleSizeSurvival2 <- getSampleSizeSurvival(accrualIntensity = 0.99, accrualTime = c(0, 10)) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival2' with expected results expect_equal(sampleSizeSurvival2$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeSurvival2$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeSurvival2$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$accrualIntensity, c(16.554072, 7.5582097, 4.2441939), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$eventsFixed, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$nFixed, c(165.54072, 75.582097, 42.441939), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$nFixed1, c(82.77036, 37.791048, 21.220969), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$nFixed2, c(82.77036, 37.791048, 21.220969), tolerance = 1e-07) expect_equal(sampleSizeSurvival2$analysisTime[1, ], 16) expect_equal(sampleSizeSurvival2$studyDuration, 16) expect_equal(sampleSizeSurvival2$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeSurvival2), NA))) expect_output(print(sampleSizeSurvival2)$show()) invisible(capture.output(expect_error(summary(sampleSizeSurvival2), NA))) expect_output(summary(sampleSizeSurvival2)$show()) sampleSizeSurvival2CodeBased <- eval(parse(text = getObjectRCode(sampleSizeSurvival2, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeSurvival2CodeBased$directionUpper, sampleSizeSurvival2$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$median1, sampleSizeSurvival2$median1, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$median2, sampleSizeSurvival2$median2, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$lambda1, sampleSizeSurvival2$lambda1, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$lambda2, sampleSizeSurvival2$lambda2, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$hazardRatio, sampleSizeSurvival2$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$maxNumberOfEvents, sampleSizeSurvival2$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$accrualIntensity, sampleSizeSurvival2$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$eventsFixed, sampleSizeSurvival2$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$nFixed, sampleSizeSurvival2$nFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$nFixed1, sampleSizeSurvival2$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$nFixed2, sampleSizeSurvival2$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$analysisTime, sampleSizeSurvival2$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$studyDuration, sampleSizeSurvival2$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeSurvival2CodeBased$criticalValuesEffectScale, sampleSizeSurvival2$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeSurvival2), "character") df <- as.data.frame(sampleSizeSurvival2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeSurvival2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} sampleSizeSurvival3 <- getSampleSizeSurvival(accrualIntensity = 1e-12, accrualTime = c(0, 10)) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival3' with expected results expect_equal(sampleSizeSurvival3$directionUpper, c(TRUE, TRUE, TRUE)) expect_equal(sampleSizeSurvival3$median1, c(16.282985, 12, 9.0776496), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$lambda1, c(0.042568802, 0.057762265, 0.076357561), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeSurvival3$hazardRatio, c(2.2892242, 3.1062837, 4.1062837), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$maxNumberOfEvents, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$accrualIntensity, c(16.554072, 7.5582097, 4.2441939), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$eventsFixed, c(45.770282, 24.438835, 15.735459), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$nFixed, c(165.54072, 75.582097, 42.441939), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$nFixed1, c(82.77036, 37.791048, 21.220969), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$nFixed2, c(82.77036, 37.791048, 21.220969), tolerance = 1e-07) expect_equal(sampleSizeSurvival3$analysisTime[1, ], 16) expect_equal(sampleSizeSurvival3$studyDuration, 16) expect_equal(sampleSizeSurvival3$criticalValuesEffectScale[1, ], c(1.7849857, 2.2098739, 2.686355), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeSurvival3), NA))) expect_output(print(sampleSizeSurvival3)$show()) invisible(capture.output(expect_error(summary(sampleSizeSurvival3), NA))) expect_output(summary(sampleSizeSurvival3)$show()) sampleSizeSurvival3CodeBased <- eval(parse(text = getObjectRCode(sampleSizeSurvival3, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeSurvival3CodeBased$directionUpper, sampleSizeSurvival3$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$median1, sampleSizeSurvival3$median1, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$median2, sampleSizeSurvival3$median2, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$lambda1, sampleSizeSurvival3$lambda1, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$lambda2, sampleSizeSurvival3$lambda2, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$hazardRatio, sampleSizeSurvival3$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$maxNumberOfEvents, sampleSizeSurvival3$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$accrualIntensity, sampleSizeSurvival3$accrualIntensity, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$eventsFixed, sampleSizeSurvival3$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$nFixed, sampleSizeSurvival3$nFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$nFixed1, sampleSizeSurvival3$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$nFixed2, sampleSizeSurvival3$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$analysisTime, sampleSizeSurvival3$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$studyDuration, sampleSizeSurvival3$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeSurvival3CodeBased$criticalValuesEffectScale, sampleSizeSurvival3$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeSurvival3), "character") df <- as.data.frame(sampleSizeSurvival3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeSurvival3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:sampleSizeSurvivalDefinitionPieceWiseAccrual} expect_equal(sampleSizeSurvival1$accrualIntensity, sampleSizeSurvival2$accrualIntensity) expect_equal(sampleSizeSurvival1$accrualIntensity, sampleSizeSurvival3$accrualIntensity) sampleSizeSurvival4 <- getSampleSizeSurvival(accrualIntensity = 1, accrualTime = c(0, 50), pi1 = 0.4) ## Comparison of the results of TrialDesignPlanSurvival object 'sampleSizeSurvival4' with expected results expect_equal(sampleSizeSurvival4$directionUpper, TRUE) expect_equal(sampleSizeSurvival4$median1, 16.282985, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$median2, 37.275405, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$lambda1, 0.042568802, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$lambda2, 0.018595296, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$hazardRatio, 2.2892242, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$maxNumberOfSubjects, 50) expect_equal(sampleSizeSurvival4$maxNumberOfEvents, 45.770282, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$followUpTime, 77.550073, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$eventsFixed, 45.770282, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$nFixed, 50) expect_equal(sampleSizeSurvival4$nFixed1, 25) expect_equal(sampleSizeSurvival4$nFixed2, 25) expect_equal(sampleSizeSurvival4$analysisTime[1, ], 127.55007, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$studyDuration, 127.55007, tolerance = 1e-07) expect_equal(sampleSizeSurvival4$criticalValuesEffectScale[1, ], 1.7849857, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeSurvival4), NA))) expect_output(print(sampleSizeSurvival4)$show()) invisible(capture.output(expect_error(summary(sampleSizeSurvival4), NA))) expect_output(summary(sampleSizeSurvival4)$show()) sampleSizeSurvival4CodeBased <- eval(parse(text = getObjectRCode(sampleSizeSurvival4, stringWrapParagraphWidth = NULL))) expect_equal(sampleSizeSurvival4CodeBased$directionUpper, sampleSizeSurvival4$directionUpper, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$median1, sampleSizeSurvival4$median1, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$median2, sampleSizeSurvival4$median2, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$lambda1, sampleSizeSurvival4$lambda1, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$lambda2, sampleSizeSurvival4$lambda2, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$hazardRatio, sampleSizeSurvival4$hazardRatio, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$maxNumberOfSubjects, sampleSizeSurvival4$maxNumberOfSubjects, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$maxNumberOfEvents, sampleSizeSurvival4$maxNumberOfEvents, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$followUpTime, sampleSizeSurvival4$followUpTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$eventsFixed, sampleSizeSurvival4$eventsFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$nFixed, sampleSizeSurvival4$nFixed, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$nFixed1, sampleSizeSurvival4$nFixed1, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$nFixed2, sampleSizeSurvival4$nFixed2, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$analysisTime, sampleSizeSurvival4$analysisTime, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$studyDuration, sampleSizeSurvival4$studyDuration, tolerance = 1e-05) expect_equal(sampleSizeSurvival4CodeBased$criticalValuesEffectScale, sampleSizeSurvival4$criticalValuesEffectScale, tolerance = 1e-05) expect_type(names(sampleSizeSurvival4), "character") df <- as.data.frame(sampleSizeSurvival4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(sampleSizeSurvival4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'.getLambdaStepFunctionByTime': return correct lambda for specified time and piecewise exponential bounds", { .skipTestIfDisabled() # @refFS[Formula]{fs:pieceWiseExponentialSurvival} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} lambda1 <- .getLambdaStepFunctionByTime(time = 1, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## Comparison of the results of numeric object 'lambda1' with expected results expect_equal(lambda1, 0.025, tolerance = 1e-07) # @refFS[Formula]{fs:pieceWiseExponentialSurvival} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} lambda2 <- .getLambdaStepFunctionByTime(time = 6, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## Comparison of the results of numeric object 'lambda2' with expected results expect_equal(lambda2, 0.025, tolerance = 1e-07) # @refFS[Formula]{fs:pieceWiseExponentialSurvival} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} lambda3 <- .getLambdaStepFunctionByTime(time = 7, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## Comparison of the results of numeric object 'lambda3' with expected results expect_equal(lambda3, 0.04, tolerance = 1e-07) # @refFS[Formula]{fs:pieceWiseExponentialSurvival} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} lambda4 <- .getLambdaStepFunctionByTime(time = 9, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## Comparison of the results of numeric object 'lambda4' with expected results expect_equal(lambda4, 0.04, tolerance = 1e-07) # @refFS[Formula]{fs:pieceWiseExponentialSurvival} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} lambda5 <- .getLambdaStepFunctionByTime(time = 14, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## Comparison of the results of numeric object 'lambda5' with expected results expect_equal(lambda5, 0.015, tolerance = 1e-07) # @refFS[Formula]{fs:pieceWiseExponentialSurvival} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} lambda6 <- .getLambdaStepFunctionByTime(time = 15, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## Comparison of the results of numeric object 'lambda6' with expected results expect_equal(lambda6, 0.015, tolerance = 1e-07) # @refFS[Formula]{fs:pieceWiseExponentialSurvival} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} lambda7 <- .getLambdaStepFunctionByTime(time = 16, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## Comparison of the results of numeric object 'lambda7' with expected results expect_equal(lambda7, 0.01, tolerance = 1e-07) # @refFS[Formula]{fs:pieceWiseExponentialSurvival} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} lambda8 <- .getLambdaStepFunctionByTime(time = 21, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## Comparison of the results of numeric object 'lambda8' with expected results expect_equal(lambda8, 0.01, tolerance = 1e-07) # @refFS[Formula]{fs:pieceWiseExponentialSurvival} # @refFS[Formula]{fs:pieceWiseExponentialRandomVariable} lambda9 <- rpact:::.getLambdaStepFunctionByTime(time = 50, c(6, 9, 15, 21), c(0.025, 0.04, 0.015, 0.01, 0.007)) ## Comparison of the results of numeric object 'lambda9' with expected results expect_equal(lambda9, 0.007, tolerance = 1e-07) }) rpact/tests/testthat/test-f_design_group_sequential.R0000644000176200001440000034345614407046544022740 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_design_group_sequential.R ## | Creation date: 06 February 2023, 12:12:02 ## | File version: $Revision: 6888 $ ## | Last changed: $Date: 2023-03-23 12:01:00 +0100 (Do, 23 Mrz 2023) $ ## | Last changed by: $Author: wassmer $ ## | test_plan_section("Testing the Group Sequential and Inverse Normal Design Functionality") test_that("'getGroupSequentialProbabilities' with one and two continuation regions for weighted test statistic", { # @refFS[Formula]{fs:testStatisticGroupSequentialWeightedAverage} xa <- getGroupSequentialProbabilities(matrix(c(rep(-qnorm(0.95), 4), rep(qnorm(0.95), 4)), nrow = 2, byrow = TRUE), (1:4) / 4) ## Comparison of the results of matrixarray object 'xa' with expected results expect_equal(xa[1, ], c(0.05, 0.030074925, 0.020961248, 0.01595848), tolerance = 1e-07) expect_equal(xa[2, ], c(0.95, 0.86992507, 0.8188889, 0.78196917), tolerance = 1e-07) expect_equal(xa[3, ], c(1, 0.9, 0.83985015, 0.79792765), tolerance = 1e-07) xb <- getGroupSequentialProbabilities(matrix(c(rep(-qnorm(0.95), 4), rep(-1, 4), rep(1, 4), rep(qnorm(0.95), 4)), nrow = 4, byrow = TRUE), (1:4) / 4) ## Comparison of the results of matrixarray object 'xb' with expected results expect_equal(xb[1, ], c(0.05, 0.016446517, 0.005264288, 0.0019569508), tolerance = 1e-07) expect_equal(xb[2, ], c(0.15865525, 0.048950554, 0.017478997, 0.0072417024), tolerance = 1e-07) expect_equal(xb[3, ], c(0.84134475, 0.16835995, 0.047529077, 0.017187717), tolerance = 1e-07) expect_equal(xb[4, ], c(0.95, 0.20086399, 0.059743786, 0.022472468), tolerance = 1e-07) expect_equal(xb[5, ], c(1, 0.21731051, 0.065008074, 0.024429419), tolerance = 1e-07) }) test_that("'getDesignInverseNormal' with default parameters: parameters and results are as expected", { # @refFS[Tab.]{fs:tab:output:getDesignInverseNormal} # @refFS[Formula]{fs:criticalValuesOBrienFleming} x0 <- getDesignInverseNormal() ## Comparison of the results of TrialDesignInverseNormal object 'x0' with expected results expect_equal(x0$alphaSpent, c(0.00025917372, 0.0071600594, 0.02499999), tolerance = 1e-07) expect_equal(x0$criticalValues, c(3.4710914, 2.4544323, 2.0040356), tolerance = 1e-07) expect_equal(x0$stageLevels, c(0.00025917372, 0.0070553616, 0.022533125), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x0), NA))) expect_output(print(x0)$show()) invisible(capture.output(expect_error(summary(x0), NA))) expect_output(summary(x0)$show()) x0CodeBased <- eval(parse(text = getObjectRCode(x0, stringWrapParagraphWidth = NULL))) expect_equal(x0CodeBased$alphaSpent, x0$alphaSpent, tolerance = 1e-07) expect_equal(x0CodeBased$criticalValues, x0$criticalValues, tolerance = 1e-07) expect_equal(x0CodeBased$stageLevels, x0$stageLevels, tolerance = 1e-07) expect_type(names(x0), "character") df <- as.data.frame(x0) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x0) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignInverseNormal' with type of design = 'asHSD', 'bsHSD', 'asKD', and 'bsKD'", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getDesignInverseNormal} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingHwangShiDeCani} x1 <- getDesignInverseNormal( kMax = 3, informationRates = c(0.2, 0.4, 1), alpha = 0.03, sided = 1, beta = 0.14, typeOfDesign = "asHSD", gammaA = 0 ) ## Comparison of the results of TrialDesignInverseNormal object 'x1' with expected results expect_equal(x1$alphaSpent, c(0.006, 0.012, 0.03), tolerance = 1e-07) expect_equal(x1$criticalValues, c(2.5121443, 2.4228747, 2.0280392), tolerance = 1e-07) expect_equal(x1$stageLevels, c(0.006, 0.0076991188, 0.021278125), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$alphaSpent, x1$alphaSpent, tolerance = 1e-07) expect_equal(x1CodeBased$criticalValues, x1$criticalValues, tolerance = 1e-07) expect_equal(x1CodeBased$stageLevels, x1$stageLevels, tolerance = 1e-07) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignCharacteristics} # @refFS[Formula]{fs:inflationFactor} # @refFS[Formula]{fs:expectedReduction} y1 <- getDesignCharacteristics(x1) ## Comparison of the results of TrialDesignCharacteristics object 'y1' with expected results expect_equal(y1$nFixed, 8.7681899, tolerance = 1e-07) expect_equal(y1$shift, 9.4594101, tolerance = 1e-07) expect_equal(y1$inflationFactor, 1.0788327, tolerance = 1e-07) expect_equal(y1$information, c(1.891882, 3.7837641, 9.4594101), tolerance = 1e-07) expect_equal(y1$power, c(0.12783451, 0.34055165, 0.86), tolerance = 1e-07) expect_equal(y1$rejectionProbabilities, c(0.12783451, 0.21271713, 0.51944835), tolerance = 1e-07) expect_equal(y1$futilityProbabilities, c(0, 0)) expect_equal(y1$averageSampleNumber1, 0.83081135, tolerance = 1e-07) expect_equal(y1$averageSampleNumber01, 1.0142116, tolerance = 1e-07) expect_equal(y1$averageSampleNumber0, 1.0697705, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y1), NA))) expect_output(print(y1)$show()) invisible(capture.output(expect_error(summary(y1), NA))) expect_output(summary(y1)$show()) y1CodeBased <- eval(parse(text = getObjectRCode(y1, stringWrapParagraphWidth = NULL))) expect_equal(y1CodeBased$nFixed, y1$nFixed, tolerance = 1e-07) expect_equal(y1CodeBased$shift, y1$shift, tolerance = 1e-07) expect_equal(y1CodeBased$inflationFactor, y1$inflationFactor, tolerance = 1e-07) expect_equal(y1CodeBased$information, y1$information, tolerance = 1e-07) expect_equal(y1CodeBased$power, y1$power, tolerance = 1e-07) expect_equal(y1CodeBased$rejectionProbabilities, y1$rejectionProbabilities, tolerance = 1e-07) expect_equal(y1CodeBased$futilityProbabilities, y1$futilityProbabilities, tolerance = 1e-07) expect_equal(y1CodeBased$averageSampleNumber1, y1$averageSampleNumber1, tolerance = 1e-07) expect_equal(y1CodeBased$averageSampleNumber01, y1$averageSampleNumber01, tolerance = 1e-07) expect_equal(y1CodeBased$averageSampleNumber0, y1$averageSampleNumber0, tolerance = 1e-07) expect_type(names(y1), "character") df <- as.data.frame(y1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignInverseNormal} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingHwangShiDeCani} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingHwangShiDeCani} x2 <- getDesignInverseNormal( kMax = 3, informationRates = c(0.2, 0.4, 1), alpha = 0.07, sided = 1, beta = 0.14, typeOfDesign = "asHSD", gammaA = -1, typeBetaSpending = "bsHSD", gammaB = -2 ) ## Comparison of the results of TrialDesignInverseNormal object 'x2' with expected results expect_equal(x2$power, c(0.12038953, 0.32895265, 0.86), tolerance = 1e-07) expect_equal(x2$futilityBounds, c(-1.1063623, -0.35992439), tolerance = 1e-07) expect_equal(x2$alphaSpent, c(0.0090195874, 0.020036136, 0.07), tolerance = 1e-07) expect_equal(x2$betaSpent, c(0.010777094, 0.026854629, 0.14), tolerance = 1e-07) expect_equal(x2$criticalValues, c(2.364813, 2.1928805, 1.5660474), tolerance = 1e-07) expect_equal(x2$stageLevels, c(0.0090195874, 0.014157994, 0.058668761), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$power, x2$power, tolerance = 1e-07) expect_equal(x2CodeBased$futilityBounds, x2$futilityBounds, tolerance = 1e-07) expect_equal(x2CodeBased$alphaSpent, x2$alphaSpent, tolerance = 1e-07) expect_equal(x2CodeBased$betaSpent, x2$betaSpent, tolerance = 1e-07) expect_equal(x2CodeBased$criticalValues, x2$criticalValues, tolerance = 1e-07) expect_equal(x2CodeBased$stageLevels, x2$stageLevels, tolerance = 1e-07) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignCharacteristics} # @refFS[Formula]{fs:inflationFactor} # @refFS[Formula]{fs:expectedReduction} y2 <- getDesignCharacteristics(x2) ## Comparison of the results of TrialDesignCharacteristics object 'y2' with expected results expect_equal(y2$nFixed, 6.5337002, tolerance = 1e-07) expect_equal(y2$shift, 7.1015942, tolerance = 1e-07) expect_equal(y2$inflationFactor, 1.0869177, tolerance = 1e-07) expect_equal(y2$information, c(1.4203188, 2.8406377, 7.1015942), tolerance = 1e-07) expect_equal(y2$power, c(0.12038953, 0.32895265, 0.86), tolerance = 1e-07) expect_equal(y2$rejectionProbabilities, c(0.12038953, 0.20856311, 0.53104735), tolerance = 1e-07) expect_equal(y2$futilityProbabilities, c(0.010777094, 0.016077535), tolerance = 1e-07) expect_equal(y2$averageSampleNumber1, 0.82636428, tolerance = 1e-07) expect_equal(y2$averageSampleNumber01, 0.916142, tolerance = 1e-07) expect_equal(y2$averageSampleNumber0, 0.79471657, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y2), NA))) expect_output(print(y2)$show()) invisible(capture.output(expect_error(summary(y2), NA))) expect_output(summary(y2)$show()) y2CodeBased <- eval(parse(text = getObjectRCode(y2, stringWrapParagraphWidth = NULL))) expect_equal(y2CodeBased$nFixed, y2$nFixed, tolerance = 1e-07) expect_equal(y2CodeBased$shift, y2$shift, tolerance = 1e-07) expect_equal(y2CodeBased$inflationFactor, y2$inflationFactor, tolerance = 1e-07) expect_equal(y2CodeBased$information, y2$information, tolerance = 1e-07) expect_equal(y2CodeBased$power, y2$power, tolerance = 1e-07) expect_equal(y2CodeBased$rejectionProbabilities, y2$rejectionProbabilities, tolerance = 1e-07) expect_equal(y2CodeBased$futilityProbabilities, y2$futilityProbabilities, tolerance = 1e-07) expect_equal(y2CodeBased$averageSampleNumber1, y2$averageSampleNumber1, tolerance = 1e-07) expect_equal(y2CodeBased$averageSampleNumber01, y2$averageSampleNumber01, tolerance = 1e-07) expect_equal(y2CodeBased$averageSampleNumber0, y2$averageSampleNumber0, tolerance = 1e-07) expect_type(names(y2), "character") df <- as.data.frame(y2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignInverseNormal} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingKimDeMets} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingKimDeMets} x3 <- getDesignInverseNormal( kMax = 3, informationRates = c(0.3, 0.7, 1), alpha = 0.03, sided = 1, beta = 0.34, typeOfDesign = "asKD", gammaA = 2.2, typeBetaSpending = "bsKD", gammaB = 3.2 ) ## Comparison of the results of TrialDesignInverseNormal object 'x3' with expected results expect_equal(x3$power, c(0.058336437, 0.398246, 0.66), tolerance = 1e-07) expect_equal(x3$futilityBounds, c(-1.1558435, 0.72836893), tolerance = 1e-07) expect_equal(x3$alphaSpent, c(0.0021222083, 0.013687904, 0.03), tolerance = 1e-07) expect_equal(x3$betaSpent, c(0.0072155083, 0.1085907, 0.34), tolerance = 1e-07) expect_equal(x3$criticalValues, c(2.8594012, 2.2435708, 1.9735737), tolerance = 1e-07) expect_equal(x3$stageLevels, c(0.0021222083, 0.012430014, 0.02421512), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$power, x3$power, tolerance = 1e-07) expect_equal(x3CodeBased$futilityBounds, x3$futilityBounds, tolerance = 1e-07) expect_equal(x3CodeBased$alphaSpent, x3$alphaSpent, tolerance = 1e-07) expect_equal(x3CodeBased$betaSpent, x3$betaSpent, tolerance = 1e-07) expect_equal(x3CodeBased$criticalValues, x3$criticalValues, tolerance = 1e-07) expect_equal(x3CodeBased$stageLevels, x3$stageLevels, tolerance = 1e-07) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignCharacteristics} # @refFS[Formula]{fs:inflationFactor} # @refFS[Formula]{fs:expectedReduction} y3 <- getDesignCharacteristics(x3) ## Comparison of the results of TrialDesignCharacteristics object 'y3' with expected results expect_equal(y3$nFixed, 5.2590265, tolerance = 1e-07) expect_equal(y3$shift, 5.551371, tolerance = 1e-07) expect_equal(y3$inflationFactor, 1.0555891, tolerance = 1e-07) expect_equal(y3$information, c(1.6654113, 3.8859597, 5.551371), tolerance = 1e-07) expect_equal(y3$power, c(0.058336437, 0.398246, 0.66), tolerance = 1e-07) expect_equal(y3$rejectionProbabilities, c(0.058336437, 0.33990957, 0.261754), tolerance = 1e-07) expect_equal(y3$futilityProbabilities, c(0.0072155083, 0.1013752), tolerance = 1e-07) expect_equal(y3$averageSampleNumber1, 0.86740735, tolerance = 1e-07) expect_equal(y3$averageSampleNumber01, 0.87361707, tolerance = 1e-07) expect_equal(y3$averageSampleNumber0, 0.75480974, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(y3), NA))) expect_output(print(y3)$show()) invisible(capture.output(expect_error(summary(y3), NA))) expect_output(summary(y3)$show()) y3CodeBased <- eval(parse(text = getObjectRCode(y3, stringWrapParagraphWidth = NULL))) expect_equal(y3CodeBased$nFixed, y3$nFixed, tolerance = 1e-07) expect_equal(y3CodeBased$shift, y3$shift, tolerance = 1e-07) expect_equal(y3CodeBased$inflationFactor, y3$inflationFactor, tolerance = 1e-07) expect_equal(y3CodeBased$information, y3$information, tolerance = 1e-07) expect_equal(y3CodeBased$power, y3$power, tolerance = 1e-07) expect_equal(y3CodeBased$rejectionProbabilities, y3$rejectionProbabilities, tolerance = 1e-07) expect_equal(y3CodeBased$futilityProbabilities, y3$futilityProbabilities, tolerance = 1e-07) expect_equal(y3CodeBased$averageSampleNumber1, y3$averageSampleNumber1, tolerance = 1e-07) expect_equal(y3CodeBased$averageSampleNumber01, y3$averageSampleNumber01, tolerance = 1e-07) expect_equal(y3CodeBased$averageSampleNumber0, y3$averageSampleNumber0, tolerance = 1e-07) expect_type(names(y3), "character") df <- as.data.frame(y3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(y3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignInverseNormal' with binding futility bounds", { # @refFS[Tab.]{fs:tab:output:getDesignInverseNormal} # @refFS[Formula]{fs:criticalValuesWithFutility} # @refFS[Formula]{fs:criticalValuesWangTiatis} x4 <- getDesignInverseNormal( kMax = 4, alpha = 0.035, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4 ) ## Comparison of the results of TrialDesignInverseNormal object 'x4' with expected results expect_equal(x4$alphaSpent, c(0.0099446089, 0.020756912, 0.029001537, 0.03499999), tolerance = 1e-07) expect_equal(x4$criticalValues, c(2.3284312, 2.1725031, 2.0861776, 2.0270171), tolerance = 1e-07) expect_equal(x4$stageLevels, c(0.0099446089, 0.014908866, 0.018481267, 0.021330332), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$alphaSpent, x4$alphaSpent, tolerance = 1e-07) expect_equal(x4CodeBased$criticalValues, x4$criticalValues, tolerance = 1e-07) expect_equal(x4CodeBased$stageLevels, x4$stageLevels, tolerance = 1e-07) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asUser'", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} x5 <- getDesignGroupSequential( typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.03, 0.05) ) ## Comparison of the results of TrialDesignGroupSequential object 'x5' with expected results expect_equal(x5$alphaSpent, c(0.01, 0.02, 0.03, 0.04999999), tolerance = 1e-07) expect_equal(x5$criticalValues, c(2.3263479, 2.2192994, 2.1201347, 1.8189562), tolerance = 1e-07) expect_equal(x5$stageLevels, c(0.01, 0.01323318, 0.016997342, 0.034459057), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$alphaSpent, x5$alphaSpent, tolerance = 1e-07) expect_equal(x5CodeBased$criticalValues, x5$criticalValues, tolerance = 1e-07) expect_equal(x5CodeBased$stageLevels, x5$stageLevels, tolerance = 1e-07) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asP' and 'bsUser' and non-binding futility bounds", { # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingPocock} # @refFS[Formula]{fs:betaSpendingApproach} x6a <- getDesignGroupSequential( kMax = 3, alpha = 0.13, typeOfDesign = "asP", typeBetaSpending = "bsUser", informationRates = c(0.35, 0.7, 1), bindingFutility = FALSE, userBetaSpending = c(0.01, 0.05, 0.3) ) ## Comparison of the results of TrialDesignGroupSequential object 'x6a' with expected results expect_equal(x6a$power, c(0.31774348, 0.5598179, 0.7), tolerance = 1e-07) expect_equal(x6a$futilityBounds, c(-1.2557044, -0.16828659), tolerance = 1e-07) expect_equal(x6a$alphaSpent, c(0.061214062, 0.10266465, 0.13), tolerance = 1e-07) expect_equal(x6a$betaSpent, c(0.01, 0.05, 0.3), tolerance = 1e-07) expect_equal(x6a$criticalValues, c(1.5446617, 1.4828682, 1.4620058), tolerance = 1e-07) expect_equal(x6a$stageLevels, c(0.061214062, 0.069054712, 0.071869812), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6a), NA))) expect_output(print(x6a)$show()) invisible(capture.output(expect_error(summary(x6a), NA))) expect_output(summary(x6a)$show()) x6aCodeBased <- eval(parse(text = getObjectRCode(x6a, stringWrapParagraphWidth = NULL))) expect_equal(x6aCodeBased$power, x6a$power, tolerance = 1e-07) expect_equal(x6aCodeBased$futilityBounds, x6a$futilityBounds, tolerance = 1e-07) expect_equal(x6aCodeBased$alphaSpent, x6a$alphaSpent, tolerance = 1e-07) expect_equal(x6aCodeBased$betaSpent, x6a$betaSpent, tolerance = 1e-07) expect_equal(x6aCodeBased$criticalValues, x6a$criticalValues, tolerance = 1e-07) expect_equal(x6aCodeBased$stageLevels, x6a$stageLevels, tolerance = 1e-07) expect_type(names(x6a), "character") df <- as.data.frame(x6a) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6a) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asP' and information rate < 1 at maximum stage", { # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingPocock} x6b <- getDesignGroupSequential(informationRates = c(0.4, 0.7), typeOfDesign = "asP") ## Comparison of the results of TrialDesignGroupSequential object 'x6b' with expected results expect_equal(x6b$alphaSpent, c(0.013078429, 0.0197432), tolerance = 1e-07) expect_equal(x6b$criticalValues, c(2.223875, 2.3050796), tolerance = 1e-07) expect_equal(x6b$stageLevels, c(0.013078429, 0.010581057), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6b), NA))) expect_output(print(x6b)$show()) invisible(capture.output(expect_error(summary(x6b), NA))) expect_output(summary(x6b)$show()) x6bCodeBased <- eval(parse(text = getObjectRCode(x6b, stringWrapParagraphWidth = NULL))) expect_equal(x6bCodeBased$alphaSpent, x6b$alphaSpent, tolerance = 1e-07) expect_equal(x6bCodeBased$criticalValues, x6b$criticalValues, tolerance = 1e-07) expect_equal(x6bCodeBased$stageLevels, x6b$stageLevels, tolerance = 1e-07) expect_type(names(x6b), "character") df <- as.data.frame(x6b) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6b) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsKD' and non-binding futility bounds (kMax = 3)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingKimDeMets} x7a <- getDesignGroupSequential( kMax = 3, alpha = 0.13, beta = 0.41, typeOfDesign = "asOF", typeBetaSpending = "bsKD", informationRates = c(0.4, 0.75, 1), gammaB = 2.5, bindingFutility = FALSE ) ## Comparison of the results of TrialDesignGroupSequential object 'x7a' with expected results expect_equal(x7a$power, c(0.10903632, 0.42541278, 0.59), tolerance = 1e-07) expect_equal(x7a$futilityBounds, c(-0.83725762, 0.35992547), tolerance = 1e-07) expect_equal(x7a$alphaSpent, c(0.016665509, 0.080406163, 0.12999999), tolerance = 1e-07) expect_equal(x7a$betaSpent, c(0.041489083, 0.19972711, 0.41), tolerance = 1e-07) expect_equal(x7a$criticalValues, c(2.1280732, 1.4368565, 1.2468994), tolerance = 1e-07) expect_equal(x7a$stageLevels, c(0.016665509, 0.075379384, 0.1062172), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7a), NA))) expect_output(print(x7a)$show()) invisible(capture.output(expect_error(summary(x7a), NA))) expect_output(summary(x7a)$show()) x7aCodeBased <- eval(parse(text = getObjectRCode(x7a, stringWrapParagraphWidth = NULL))) expect_equal(x7aCodeBased$power, x7a$power, tolerance = 1e-07) expect_equal(x7aCodeBased$futilityBounds, x7a$futilityBounds, tolerance = 1e-07) expect_equal(x7aCodeBased$alphaSpent, x7a$alphaSpent, tolerance = 1e-07) expect_equal(x7aCodeBased$betaSpent, x7a$betaSpent, tolerance = 1e-07) expect_equal(x7aCodeBased$criticalValues, x7a$criticalValues, tolerance = 1e-07) expect_equal(x7aCodeBased$stageLevels, x7a$stageLevels, tolerance = 1e-07) expect_type(names(x7a), "character") df <- as.data.frame(x7a) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7a) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsKD' and non-binding futility bounds (kMax = 4)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingKimDeMets} x7a <- getDesignGroupSequential( kMax = 4, alpha = 0.13, beta = 0.41, typeOfDesign = "asOF", typeBetaSpending = "bsKD", informationRates = c(0.4, 0.75, 0.85, 1), gammaB = 2.5, bindingFutility = FALSE ) ## Comparison of the results of TrialDesignGroupSequential object 'x7a' with expected results expect_equal(x7a$power, c(0.1110095, 0.43099683, 0.50326205, 0.59), tolerance = 1e-07) expect_equal(x7a$futilityBounds, c(-0.82676531, 0.3743303, 0.65077266), tolerance = 1e-07) expect_equal(x7a$alphaSpent, c(0.016665509, 0.080406163, 0.10053322, 0.13), tolerance = 1e-07) expect_equal(x7a$betaSpent, c(0.041489083, 0.19972711, 0.27310596, 0.41), tolerance = 1e-07) expect_equal(x7a$criticalValues, c(2.1280732, 1.4368565, 1.422873, 1.2970881), tolerance = 1e-07) expect_equal(x7a$stageLevels, c(0.016665509, 0.075379384, 0.077386492, 0.097300444), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7a), NA))) expect_output(print(x7a)$show()) invisible(capture.output(expect_error(summary(x7a), NA))) expect_output(summary(x7a)$show()) x7aCodeBased <- eval(parse(text = getObjectRCode(x7a, stringWrapParagraphWidth = NULL))) expect_equal(x7aCodeBased$power, x7a$power, tolerance = 1e-07) expect_equal(x7aCodeBased$futilityBounds, x7a$futilityBounds, tolerance = 1e-07) expect_equal(x7aCodeBased$alphaSpent, x7a$alphaSpent, tolerance = 1e-07) expect_equal(x7aCodeBased$betaSpent, x7a$betaSpent, tolerance = 1e-07) expect_equal(x7aCodeBased$criticalValues, x7a$criticalValues, tolerance = 1e-07) expect_equal(x7aCodeBased$stageLevels, x7a$stageLevels, tolerance = 1e-07) expect_type(names(x7a), "character") df <- as.data.frame(x7a) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7a) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asP' and 'bsUser' and binding futility bounds", { # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingPocock} # @refFS[Formula]{fs:betaSpendingApproach} x6b <- getDesignGroupSequential( kMax = 3, alpha = 0.13, typeOfDesign = "asP", typeBetaSpending = "bsUser", informationRates = c(0.35, 0.7, 1), bindingFutility = TRUE, userBetaSpending = c(0.01, 0.05, 0.3) ) ## Comparison of the results of TrialDesignGroupSequential object 'x6b' with expected results expect_equal(x6b$power, c(0.31728597, 0.55917233, 0.7), tolerance = 1e-07) expect_equal(x6b$futilityBounds, c(-1.2569879, -0.17011271), tolerance = 1e-07) expect_equal(x6b$alphaSpent, c(0.061214062, 0.10266465, 0.13), tolerance = 1e-07) expect_equal(x6b$betaSpent, c(0.01, 0.05, 0.3), tolerance = 1e-07) expect_equal(x6b$criticalValues, c(1.5446617, 1.4827312, 1.4588737), tolerance = 1e-07) expect_equal(x6b$stageLevels, c(0.061214062, 0.069072925, 0.072299935), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6b), NA))) expect_output(print(x6b)$show()) invisible(capture.output(expect_error(summary(x6b), NA))) expect_output(summary(x6b)$show()) x6bCodeBased <- eval(parse(text = getObjectRCode(x6b, stringWrapParagraphWidth = NULL))) expect_equal(x6bCodeBased$power, x6b$power, tolerance = 1e-07) expect_equal(x6bCodeBased$futilityBounds, x6b$futilityBounds, tolerance = 1e-07) expect_equal(x6bCodeBased$alphaSpent, x6b$alphaSpent, tolerance = 1e-07) expect_equal(x6bCodeBased$betaSpent, x6b$betaSpent, tolerance = 1e-07) expect_equal(x6bCodeBased$criticalValues, x6b$criticalValues, tolerance = 1e-07) expect_equal(x6bCodeBased$stageLevels, x6b$stageLevels, tolerance = 1e-07) expect_type(names(x6b), "character") df <- as.data.frame(x6b) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6b) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsKD' and binding futility bounds (kMax = 3)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingKimDeMets} x7b <- getDesignGroupSequential( kMax = 3, alpha = 0.13, beta = 0.41, typeOfDesign = "asOF", typeBetaSpending = "bsKD", informationRates = c(0.4, 0.75, 1), gammaB = 2.5, bindingFutility = TRUE ) ## Comparison of the results of TrialDesignGroupSequential object 'x7b' with expected results expect_equal(x7b$power, c(0.1067887, 0.41918821, 0.59), tolerance = 1e-07) expect_equal(x7b$futilityBounds, c(-0.84937686, 0.34328914), tolerance = 1e-07) expect_equal(x7b$alphaSpent, c(0.016665509, 0.080406163, 0.12999999), tolerance = 1e-07) expect_equal(x7b$betaSpent, c(0.041489083, 0.19972711, 0.41), tolerance = 1e-07) expect_equal(x7b$criticalValues, c(2.1280732, 1.4362896, 1.2218662), tolerance = 1e-07) expect_equal(x7b$stageLevels, c(0.016665509, 0.075459972, 0.11087911), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7b), NA))) expect_output(print(x7b)$show()) invisible(capture.output(expect_error(summary(x7b), NA))) expect_output(summary(x7b)$show()) x7bCodeBased <- eval(parse(text = getObjectRCode(x7b, stringWrapParagraphWidth = NULL))) expect_equal(x7bCodeBased$power, x7b$power, tolerance = 1e-07) expect_equal(x7bCodeBased$futilityBounds, x7b$futilityBounds, tolerance = 1e-07) expect_equal(x7bCodeBased$alphaSpent, x7b$alphaSpent, tolerance = 1e-07) expect_equal(x7bCodeBased$betaSpent, x7b$betaSpent, tolerance = 1e-07) expect_equal(x7bCodeBased$criticalValues, x7b$criticalValues, tolerance = 1e-07) expect_equal(x7bCodeBased$stageLevels, x7b$stageLevels, tolerance = 1e-07) expect_type(names(x7b), "character") df <- as.data.frame(x7b) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7b) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsKD' and binding futility bounds (kMax = 4)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingKimDeMets} x7b <- getDesignGroupSequential( kMax = 4, alpha = 0.13, beta = 0.41, typeOfDesign = "asOF", typeBetaSpending = "bsKD", informationRates = c(0.4, 0.75, 0.85, 1), gammaB = 2.5, bindingFutility = TRUE ) ## Comparison of the results of TrialDesignGroupSequential object 'x7b' with expected results expect_equal(x7b$power, c(0.10806422, 0.422855, 0.4950578, 0.59), tolerance = 1e-07) expect_equal(x7b$futilityBounds, c(-0.84247693, 0.35276055, 0.62744509), tolerance = 1e-07) expect_equal(x7b$alphaSpent, c(0.016665509, 0.080406163, 0.10053322, 0.13), tolerance = 1e-07) expect_equal(x7b$betaSpent, c(0.041489083, 0.19972711, 0.27310596, 0.41), tolerance = 1e-07) expect_equal(x7b$criticalValues, c(2.1280732, 1.4362706, 1.4203748, 1.2576258), tolerance = 1e-07) expect_equal(x7b$stageLevels, c(0.016665509, 0.075462674, 0.077749297, 0.10426357), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7b), NA))) expect_output(print(x7b)$show()) invisible(capture.output(expect_error(summary(x7b), NA))) expect_output(summary(x7b)$show()) x7bCodeBased <- eval(parse(text = getObjectRCode(x7b, stringWrapParagraphWidth = NULL))) expect_equal(x7bCodeBased$power, x7b$power, tolerance = 1e-07) expect_equal(x7bCodeBased$futilityBounds, x7b$futilityBounds, tolerance = 1e-07) expect_equal(x7bCodeBased$alphaSpent, x7b$alphaSpent, tolerance = 1e-07) expect_equal(x7bCodeBased$betaSpent, x7b$betaSpent, tolerance = 1e-07) expect_equal(x7bCodeBased$criticalValues, x7b$criticalValues, tolerance = 1e-07) expect_equal(x7bCodeBased$stageLevels, x7b$stageLevels, tolerance = 1e-07) expect_type(names(x7b), "character") df <- as.data.frame(x7b) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7b) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsKD' and binding futility bounds, two-sided (kMax = 3)", { # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingKimDeMets} # @refFS[Formula]{fs:betaSpendingApproachTwoSided} # @refFS[Formula]{fs:betaSpendingAdjustment} suppressWarnings(x7c <- getDesignGroupSequential( kMax = 3, alpha = 0.09, beta = 0.11, sided = 2, typeOfDesign = "asOF", typeBetaSpending = "bsKD", informationRates = c(0.2, 0.55, 1), gammaB = 2.5, bindingFutility = TRUE )) ## Comparison of the results of TrialDesignGroupSequential object 'x7c' with expected results expect_equal(x7c$power, c(0.0013105743, 0.39377047, 0.889997), tolerance = 1e-07) expect_equal(x7c$futilityBounds, c(NA_real_, 0.30419861), tolerance = 1e-07) expect_equal(x7c$alphaSpent, c(1.475171e-05, 0.013740227, 0.09), tolerance = 1e-07) expect_equal(x7c$betaSpent, c(0, 0.023123303, 0.11), tolerance = 1e-07) expect_equal(x7c$criticalValues, c(4.3323635, 2.4641251, 1.7013171), tolerance = 1e-07) expect_equal(x7c$stageLevels, c(7.375855e-06, 0.006867409, 0.044441733), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7c), NA))) expect_output(print(x7c)$show()) invisible(capture.output(expect_error(summary(x7c), NA))) expect_output(summary(x7c)$show()) suppressWarnings(x7cCodeBased <- eval(parse(text = getObjectRCode(x7c, stringWrapParagraphWidth = NULL)))) expect_equal(x7cCodeBased$power, x7c$power, tolerance = 1e-07) expect_equal(x7cCodeBased$futilityBounds, x7c$futilityBounds, tolerance = 1e-07) expect_equal(x7cCodeBased$alphaSpent, x7c$alphaSpent, tolerance = 1e-07) expect_equal(x7cCodeBased$betaSpent, x7c$betaSpent, tolerance = 1e-07) expect_equal(x7cCodeBased$criticalValues, x7c$criticalValues, tolerance = 1e-07) expect_equal(x7cCodeBased$stageLevels, x7c$stageLevels, tolerance = 1e-07) expect_type(names(x7c), "character") df <- as.data.frame(x7c) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7c) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(x7d <- getDesignGroupSequential( kMax = 3, alpha = 0.05, beta = 0.2, sided = 2, typeOfDesign = "asOF", typeBetaSpending = "bsKD", informationRates = c(0.4, 0.65, 1), gammaB = 1.5, bindingFutility = TRUE )) ## Comparison of the results of TrialDesignGroupSequential object 'x7d' with expected results expect_equal(x7d$power, c(0.063122463, 0.41229849, 0.79999885), tolerance = 1e-07) expect_equal(x7d$futilityBounds, c(0.32391511, 0.9194681), tolerance = 1e-07) expect_equal(x7d$alphaSpent, c(0.00078830351, 0.010867832, 0.04999999), tolerance = 1e-07) expect_equal(x7d$betaSpent, c(0.050596443, 0.10480935, 0.2), tolerance = 1e-07) expect_equal(x7d$criticalValues, c(3.3568694, 2.5549656, 1.9350784), tolerance = 1e-07) expect_equal(x7d$stageLevels, c(0.00039415176, 0.0053099152, 0.026490337), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7d), NA))) expect_output(print(x7d)$show()) invisible(capture.output(expect_error(summary(x7d), NA))) expect_output(summary(x7d)$show()) suppressWarnings(x7dCodeBased <- eval(parse(text = getObjectRCode(x7d, stringWrapParagraphWidth = NULL)))) expect_equal(x7dCodeBased$power, x7d$power, tolerance = 1e-07) expect_equal(x7dCodeBased$futilityBounds, x7d$futilityBounds, tolerance = 1e-07) expect_equal(x7dCodeBased$alphaSpent, x7d$alphaSpent, tolerance = 1e-07) expect_equal(x7dCodeBased$betaSpent, x7d$betaSpent, tolerance = 1e-07) expect_equal(x7dCodeBased$criticalValues, x7d$criticalValues, tolerance = 1e-07) expect_equal(x7dCodeBased$stageLevels, x7d$stageLevels, tolerance = 1e-07) expect_type(names(x7d), "character") df <- as.data.frame(x7d) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7d) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsKD' and non-binding futility bounds, no betaAdjustment, two-sided (kMax = 3)", { # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingKimDeMets} # @refFS[Formula]{fs:betaSpendingApproachTwoSided} # @refFS[Formula]{fs:betaSpendingAdjustment} suppressWarnings(x7e <- getDesignGroupSequential( kMax = 3, alpha = 0.09, beta = 0.11, sided = 2, typeOfDesign = "asOF", typeBetaSpending = "bsKD", informationRates = c(0.4, 0.65, 1), betaAdjustment = FALSE, gammaB = 2.5, bindingFutility = FALSE )) ## Comparison of the results of TrialDesignGroupSequential object 'x7e' with expected results expect_equal(x7e$power, c(0.14268064, 0.57037981, 0.88999701), tolerance = 1e-07) expect_equal(x7e$futilityBounds, c(NA_real_, 0.64692592), tolerance = 1e-07) expect_equal(x7e$alphaSpent, c(0.0030525896, 0.025803646, 0.09), tolerance = 1e-07) expect_equal(x7e$betaSpent, c(0, 0.037469343, 0.11), tolerance = 1e-07) expect_equal(x7e$criticalValues, c(2.9623919, 2.2442359, 1.7391729), tolerance = 1e-07) expect_equal(x7e$stageLevels, c(0.0015262948, 0.012408614, 0.041002179), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7e), NA))) expect_output(print(x7e)$show()) invisible(capture.output(expect_error(summary(x7e), NA))) expect_output(summary(x7e)$show()) suppressWarnings(x7eCodeBased <- eval(parse(text = getObjectRCode(x7e, stringWrapParagraphWidth = NULL)))) expect_equal(x7eCodeBased$power, x7e$power, tolerance = 1e-07) expect_equal(x7eCodeBased$futilityBounds, x7e$futilityBounds, tolerance = 1e-07) expect_equal(x7eCodeBased$alphaSpent, x7e$alphaSpent, tolerance = 1e-07) expect_equal(x7eCodeBased$betaSpent, x7e$betaSpent, tolerance = 1e-07) expect_equal(x7eCodeBased$criticalValues, x7e$criticalValues, tolerance = 1e-07) expect_equal(x7eCodeBased$stageLevels, x7e$stageLevels, tolerance = 1e-07) expect_type(names(x7e), "character") df <- as.data.frame(x7e) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7e) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asOF' and 'bsOF', binding futility bounds and delayed response (kMax = 3)", { # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingOBrienFleming} # @refFS[Formula]{fs:delayedResponseCondition1} # @refFS[Formula]{fs:delayedResponseCondition2} # @refFS[Formula]{fs:delayedResponsePower} suppressWarnings(dl1 <- getDesignGroupSequential( kMax = 3, alpha = 0.05, beta = 0.1, sided = 1, typeOfDesign = "asOF", typeBetaSpending = "bsOF", informationRates = c(0.4, 0.65, 1), bindingFutility = TRUE, delayedInformation = c(0.1, 0.2) )) ## Comparison of the results of TrialDesignGroupSequential object 'dl1' with expected results expect_equal(dl1$power, c(0.15998522, 0.59313184, 0.9), tolerance = 1e-07) expect_equal(dl1$futilityBounds, c(-0.46043472, 0.64445014), tolerance = 1e-07) expect_equal(dl1$alphaSpent, c(0.001941913, 0.015055713, 0.05), tolerance = 1e-07) expect_equal(dl1$betaSpent, c(0.00930224, 0.041331422, 0.1), tolerance = 1e-07) expect_equal(dl1$criticalValues, c(2.8874465, 2.1853011, 1.6575593), tolerance = 1e-07) expect_equal(dl1$stageLevels, c(0.001941913, 0.014433388, 0.048703222), tolerance = 1e-07) expect_equal(dl1$decisionCriticalValues, c(1.3388855, 1.5378695, 1.6575593), tolerance = 1e-07) expect_equal(dl1$reversalProbabilities, c(1.7563249e-06, 0.0014674026), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl1), NA))) expect_output(print(dl1)$show()) invisible(capture.output(expect_error(summary(dl1), NA))) expect_output(summary(dl1)$show()) suppressWarnings(dl1CodeBased <- eval(parse(text = getObjectRCode(dl1, stringWrapParagraphWidth = NULL)))) expect_equal(dl1CodeBased$power, dl1$power, tolerance = 1e-07) expect_equal(dl1CodeBased$futilityBounds, dl1$futilityBounds, tolerance = 1e-07) expect_equal(dl1CodeBased$alphaSpent, dl1$alphaSpent, tolerance = 1e-07) expect_equal(dl1CodeBased$betaSpent, dl1$betaSpent, tolerance = 1e-07) expect_equal(dl1CodeBased$criticalValues, dl1$criticalValues, tolerance = 1e-07) expect_equal(dl1CodeBased$stageLevels, dl1$stageLevels, tolerance = 1e-07) expect_equal(dl1CodeBased$decisionCriticalValues, dl1$decisionCriticalValues, tolerance = 1e-07) expect_equal(dl1CodeBased$reversalProbabilities, dl1$reversalProbabilities, tolerance = 1e-07) expect_type(names(dl1), "character") df <- as.data.frame(dl1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } dl2 <- getDesignCharacteristics(dl1) ## Comparison of the results of TrialDesignCharacteristics object 'dl2' with expected results expect_equal(dl2$nFixed, 8.5638474, tolerance = 1e-07) expect_equal(dl2$shift, 8.8633082, tolerance = 1e-07) expect_equal(dl2$inflationFactor, 1.034968, tolerance = 1e-07) expect_equal(dl2$information, c(3.5453233, 5.7611503, 8.8633082), tolerance = 1e-07) expect_equal(dl2$power, c(0.15755984, 0.59089729, 0.9), tolerance = 1e-07) expect_equal(dl2$rejectionProbabilities, c(0.15755984, 0.43333745, 0.30910271), tolerance = 1e-07) expect_equal(dl2$futilityProbabilities, c(0.0095560402, 0.032904105), tolerance = 1e-07) expect_equal(dl2$averageSampleNumber1, 0.87652961, tolerance = 1e-07) expect_equal(dl2$averageSampleNumber01, 0.92477729, tolerance = 1e-07) expect_equal(dl2$averageSampleNumber0, 0.79932679, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl2), NA))) expect_output(print(dl2)$show()) invisible(capture.output(expect_error(summary(dl2), NA))) expect_output(summary(dl2)$show()) suppressWarnings(dl2CodeBased <- eval(parse(text = getObjectRCode(dl2, stringWrapParagraphWidth = NULL)))) expect_equal(dl2CodeBased$nFixed, dl2$nFixed, tolerance = 1e-07) expect_equal(dl2CodeBased$shift, dl2$shift, tolerance = 1e-07) expect_equal(dl2CodeBased$inflationFactor, dl2$inflationFactor, tolerance = 1e-07) expect_equal(dl2CodeBased$information, dl2$information, tolerance = 1e-07) expect_equal(dl2CodeBased$power, dl2$power, tolerance = 1e-07) expect_equal(dl2CodeBased$rejectionProbabilities, dl2$rejectionProbabilities, tolerance = 1e-07) expect_equal(dl2CodeBased$futilityProbabilities, dl2$futilityProbabilities, tolerance = 1e-07) expect_equal(dl2CodeBased$averageSampleNumber1, dl2$averageSampleNumber1, tolerance = 1e-07) expect_equal(dl2CodeBased$averageSampleNumber01, dl2$averageSampleNumber01, tolerance = 1e-07) expect_equal(dl2CodeBased$averageSampleNumber0, dl2$averageSampleNumber0, tolerance = 1e-07) expect_type(names(dl2), "character") df <- as.data.frame(dl2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(dl3 <- getDesignGroupSequential( kMax = 3, alpha = 0.05, beta = 0.1, sided = 1, typeOfDesign = "asOF", typeBetaSpending = "bsOF", informationRates = c(0.4, 0.65, 1), bindingFutility = TRUE, delayedInformation = c(0, 0.2) )) ## Comparison of the results of TrialDesignGroupSequential object 'dl3' with expected results expect_equal(dl3$power, c(0.15998522, 0.59313184, 0.9), tolerance = 1e-07) expect_equal(dl3$futilityBounds, c(-0.46043472, 0.64445014), tolerance = 1e-07) expect_equal(dl3$alphaSpent, c(0.001941913, 0.015055713, 0.05), tolerance = 1e-07) expect_equal(dl3$betaSpent, c(0.00930224, 0.041331422, 0.1), tolerance = 1e-07) expect_equal(dl3$criticalValues, c(2.8874465, 2.1853011, 1.6575593), tolerance = 1e-07) expect_equal(dl3$stageLevels, c(0.001941913, 0.014433388, 0.048703222), tolerance = 1e-07) expect_equal(dl3$decisionCriticalValues, c(NA_real_, 1.5378695, 1.6575593), tolerance = 1e-07) expect_equal(dl3$reversalProbabilities, c(NA_real_, 0.0014674026), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl3), NA))) expect_output(print(dl3)$show()) invisible(capture.output(expect_error(summary(dl3), NA))) expect_output(summary(dl3)$show()) suppressWarnings(dl3CodeBased <- eval(parse(text = getObjectRCode(dl3, stringWrapParagraphWidth = NULL)))) expect_equal(dl3CodeBased$power, dl3$power, tolerance = 1e-07) expect_equal(dl3CodeBased$futilityBounds, dl3$futilityBounds, tolerance = 1e-07) expect_equal(dl3CodeBased$alphaSpent, dl3$alphaSpent, tolerance = 1e-07) expect_equal(dl3CodeBased$betaSpent, dl3$betaSpent, tolerance = 1e-07) expect_equal(dl3CodeBased$criticalValues, dl3$criticalValues, tolerance = 1e-07) expect_equal(dl3CodeBased$stageLevels, dl3$stageLevels, tolerance = 1e-07) expect_equal(dl3CodeBased$decisionCriticalValues, dl3$decisionCriticalValues, tolerance = 1e-07) expect_equal(dl3CodeBased$reversalProbabilities, dl3$reversalProbabilities, tolerance = 1e-07) expect_type(names(dl3), "character") df <- as.data.frame(dl3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } dl4 <- getDesignCharacteristics(dl3) ## Comparison of the results of TrialDesignCharacteristics object 'dl4' with expected results expect_equal(dl4$nFixed, 8.5638474, tolerance = 1e-07) expect_equal(dl4$shift, 8.8633608, tolerance = 1e-07) expect_equal(dl4$inflationFactor, 1.0349742, tolerance = 1e-07) expect_equal(dl4$information, c(3.5453443, 5.7611845, 8.8633608), tolerance = 1e-07) expect_equal(dl4$power, c(0.15755967, 0.59089852, 0.9), tolerance = 1e-07) expect_equal(dl4$rejectionProbabilities, c(0.15755967, 0.43333886, 0.30910148), tolerance = 1e-07) expect_equal(dl4$futilityProbabilities, c(0.0095558971, 0.032903612), tolerance = 1e-07) expect_equal(dl4$averageSampleNumber1, 0.85923802, tolerance = 1e-07) expect_equal(dl4$averageSampleNumber01, 0.91378094, tolerance = 1e-07) expect_equal(dl4$averageSampleNumber0, 0.76574207, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl4), NA))) expect_output(print(dl4)$show()) invisible(capture.output(expect_error(summary(dl4), NA))) expect_output(summary(dl4)$show()) suppressWarnings(dl4CodeBased <- eval(parse(text = getObjectRCode(dl4, stringWrapParagraphWidth = NULL)))) expect_equal(dl4CodeBased$nFixed, dl4$nFixed, tolerance = 1e-07) expect_equal(dl4CodeBased$shift, dl4$shift, tolerance = 1e-07) expect_equal(dl4CodeBased$inflationFactor, dl4$inflationFactor, tolerance = 1e-07) expect_equal(dl4CodeBased$information, dl4$information, tolerance = 1e-07) expect_equal(dl4CodeBased$power, dl4$power, tolerance = 1e-07) expect_equal(dl4CodeBased$rejectionProbabilities, dl4$rejectionProbabilities, tolerance = 1e-07) expect_equal(dl4CodeBased$futilityProbabilities, dl4$futilityProbabilities, tolerance = 1e-07) expect_equal(dl4CodeBased$averageSampleNumber1, dl4$averageSampleNumber1, tolerance = 1e-07) expect_equal(dl4CodeBased$averageSampleNumber01, dl4$averageSampleNumber01, tolerance = 1e-07) expect_equal(dl4CodeBased$averageSampleNumber0, dl4$averageSampleNumber0, tolerance = 1e-07) expect_type(names(dl4), "character") df <- as.data.frame(dl4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(dl5 <- getDesignGroupSequential( kMax = 3, alpha = 0.05, beta = 0.1, sided = 1, typeOfDesign = "asOF", typeBetaSpending = "bsOF", informationRates = c(0.4, 0.65, 1), bindingFutility = TRUE, delayedInformation = 0.3 )) ## Comparison of the results of TrialDesignGroupSequential object 'dl5' with expected results expect_equal(dl5$power, c(0.15998522, 0.59313184, 0.9), tolerance = 1e-07) expect_equal(dl5$futilityBounds, c(-0.46043472, 0.64445014), tolerance = 1e-07) expect_equal(dl5$alphaSpent, c(0.001941913, 0.015055713, 0.05), tolerance = 1e-07) expect_equal(dl5$betaSpent, c(0.00930224, 0.041331422, 0.1), tolerance = 1e-07) expect_equal(dl5$criticalValues, c(2.8874465, 2.1853011, 1.6575593), tolerance = 1e-07) expect_equal(dl5$stageLevels, c(0.001941913, 0.014433388, 0.048703222), tolerance = 1e-07) expect_equal(dl5$decisionCriticalValues, c(1.505831, 1.5735979, 1.6575593), tolerance = 1e-07) expect_equal(dl5$reversalProbabilities, c(0.00018341474, 0.0027022502), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl5), NA))) expect_output(print(dl5)$show()) invisible(capture.output(expect_error(summary(dl5), NA))) expect_output(summary(dl5)$show()) suppressWarnings(dl5CodeBased <- eval(parse(text = getObjectRCode(dl5, stringWrapParagraphWidth = NULL)))) expect_equal(dl5CodeBased$power, dl5$power, tolerance = 1e-07) expect_equal(dl5CodeBased$futilityBounds, dl5$futilityBounds, tolerance = 1e-07) expect_equal(dl5CodeBased$alphaSpent, dl5$alphaSpent, tolerance = 1e-07) expect_equal(dl5CodeBased$betaSpent, dl5$betaSpent, tolerance = 1e-07) expect_equal(dl5CodeBased$criticalValues, dl5$criticalValues, tolerance = 1e-07) expect_equal(dl5CodeBased$stageLevels, dl5$stageLevels, tolerance = 1e-07) expect_equal(dl5CodeBased$decisionCriticalValues, dl5$decisionCriticalValues, tolerance = 1e-07) expect_equal(dl5CodeBased$reversalProbabilities, dl5$reversalProbabilities, tolerance = 1e-07) expect_type(names(dl5), "character") df <- as.data.frame(dl5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } dl6 <- getDesignCharacteristics(dl5) ## Comparison of the results of TrialDesignCharacteristics object 'dl6' with expected results expect_equal(dl6$nFixed, 8.5638474, tolerance = 1e-07) expect_equal(dl6$shift, 8.7180222, tolerance = 1e-07) expect_equal(dl6$inflationFactor, 1.018003, tolerance = 1e-07) expect_equal(dl6$information, c(3.4872089, 5.6667144, 8.7180222), tolerance = 1e-07) expect_equal(dl6$power, c(0.15429254, 0.58752252, 0.9), tolerance = 1e-07) expect_equal(dl6$rejectionProbabilities, c(0.15429254, 0.43322998, 0.31247748), tolerance = 1e-07) expect_equal(dl6$futilityProbabilities, c(0.0099602552, 0.03429374), tolerance = 1e-07) expect_equal(dl6$averageSampleNumber1, 0.94451255, tolerance = 1e-07) expect_equal(dl6$averageSampleNumber01, 0.96721799, tolerance = 1e-07) expect_equal(dl6$averageSampleNumber0, 0.89669187, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl6), NA))) expect_output(print(dl6)$show()) invisible(capture.output(expect_error(summary(dl6), NA))) expect_output(summary(dl6)$show()) suppressWarnings(dl6CodeBased <- eval(parse(text = getObjectRCode(dl6, stringWrapParagraphWidth = NULL)))) expect_equal(dl6CodeBased$nFixed, dl6$nFixed, tolerance = 1e-07) expect_equal(dl6CodeBased$shift, dl6$shift, tolerance = 1e-07) expect_equal(dl6CodeBased$inflationFactor, dl6$inflationFactor, tolerance = 1e-07) expect_equal(dl6CodeBased$information, dl6$information, tolerance = 1e-07) expect_equal(dl6CodeBased$power, dl6$power, tolerance = 1e-07) expect_equal(dl6CodeBased$rejectionProbabilities, dl6$rejectionProbabilities, tolerance = 1e-07) expect_equal(dl6CodeBased$futilityProbabilities, dl6$futilityProbabilities, tolerance = 1e-07) expect_equal(dl6CodeBased$averageSampleNumber1, dl6$averageSampleNumber1, tolerance = 1e-07) expect_equal(dl6CodeBased$averageSampleNumber01, dl6$averageSampleNumber01, tolerance = 1e-07) expect_equal(dl6CodeBased$averageSampleNumber0, dl6$averageSampleNumber0, tolerance = 1e-07) expect_type(names(dl6), "character") df <- as.data.frame(dl6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'asP' and 'bsP', non-binding futility bounds and delayed response (kMax = 3)", { # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:alphaSpendingConcept} # @refFS[Formula]{fs:alphaSpendingOBrienFleming} # @refFS[Formula]{fs:betaSpendingApproach} # @refFS[Formula]{fs:betaSpendingOBrienFleming} # @refFS[Formula]{fs:delayedResponseCondition1} # @refFS[Formula]{fs:delayedResponseCondition2} # @refFS[Formula]{fs:delayedResponsePower} suppressWarnings(dl1 <- getDesignGroupSequential( kMax = 3, alpha = 0.05, beta = 0.1, sided = 1, typeOfDesign = "asP", typeBetaSpending = "bsP", informationRates = c(0.4, 0.65, 1), bindingFutility = FALSE, delayedInformation = c(0.1, 0.2) )) ## Comparison of the results of TrialDesignGroupSequential object 'dl1' with expected results expect_equal(dl1$power, c(0.58983431, 0.79279807, 0.9), tolerance = 1e-07) expect_equal(dl1$futilityBounds, c(0.5448398, 1.0899149), tolerance = 1e-07) expect_equal(dl1$alphaSpent, c(0.026156858, 0.037497241, 0.05), tolerance = 1e-07) expect_equal(dl1$betaSpent, c(0.052313716, 0.074994481, 0.099999999), tolerance = 1e-07) expect_equal(dl1$criticalValues, c(1.9405431, 2.0327662, 1.9734104), tolerance = 1e-07) expect_equal(dl1$stageLevels, c(0.026156858, 0.021038075, 0.02422441), tolerance = 1e-07) expect_equal(dl1$decisionCriticalValues, c(1.3362296, 1.657468, 1.9734104), tolerance = 1e-07) expect_equal(dl1$reversalProbabilities, c(0.0020439695, 0.0026967589), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl1), NA))) expect_output(print(dl1)$show()) invisible(capture.output(expect_error(summary(dl1), NA))) expect_output(summary(dl1)$show()) suppressWarnings(dl1CodeBased <- eval(parse(text = getObjectRCode(dl1, stringWrapParagraphWidth = NULL)))) expect_equal(dl1CodeBased$power, dl1$power, tolerance = 1e-07) expect_equal(dl1CodeBased$futilityBounds, dl1$futilityBounds, tolerance = 1e-07) expect_equal(dl1CodeBased$alphaSpent, dl1$alphaSpent, tolerance = 1e-07) expect_equal(dl1CodeBased$betaSpent, dl1$betaSpent, tolerance = 1e-07) expect_equal(dl1CodeBased$criticalValues, dl1$criticalValues, tolerance = 1e-07) expect_equal(dl1CodeBased$stageLevels, dl1$stageLevels, tolerance = 1e-07) expect_equal(dl1CodeBased$decisionCriticalValues, dl1$decisionCriticalValues, tolerance = 1e-07) expect_equal(dl1CodeBased$reversalProbabilities, dl1$reversalProbabilities, tolerance = 1e-07) expect_type(names(dl1), "character") df <- as.data.frame(dl1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } dl2 <- getDesignCharacteristics(dl1) ## Comparison of the results of TrialDesignCharacteristics object 'dl2' with expected results expect_equal(dl2$nFixed, 8.5638474, tolerance = 1e-07) expect_equal(dl2$shift, 11.345796, tolerance = 1e-07) expect_equal(dl2$inflationFactor, 1.324848, tolerance = 1e-07) expect_equal(dl2$information, c(4.5383183, 7.3747672, 11.345796), tolerance = 1e-07) expect_equal(dl2$power, c(0.57788702, 0.78847934, 0.9), tolerance = 1e-07) expect_equal(dl2$rejectionProbabilities, c(0.57788702, 0.21059232, 0.11152066), tolerance = 1e-07) expect_equal(dl2$futilityProbabilities, c(0.056427171, 0.024888086), tolerance = 1e-07) expect_equal(dl2$averageSampleNumber1, 0.86088771, tolerance = 1e-07) expect_equal(dl2$averageSampleNumber01, 0.9483049, tolerance = 1e-07) expect_equal(dl2$averageSampleNumber0, 0.80259202, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl2), NA))) expect_output(print(dl2)$show()) invisible(capture.output(expect_error(summary(dl2), NA))) expect_output(summary(dl2)$show()) suppressWarnings(dl2CodeBased <- eval(parse(text = getObjectRCode(dl2, stringWrapParagraphWidth = NULL)))) expect_equal(dl2CodeBased$nFixed, dl2$nFixed, tolerance = 1e-07) expect_equal(dl2CodeBased$shift, dl2$shift, tolerance = 1e-07) expect_equal(dl2CodeBased$inflationFactor, dl2$inflationFactor, tolerance = 1e-07) expect_equal(dl2CodeBased$information, dl2$information, tolerance = 1e-07) expect_equal(dl2CodeBased$power, dl2$power, tolerance = 1e-07) expect_equal(dl2CodeBased$rejectionProbabilities, dl2$rejectionProbabilities, tolerance = 1e-07) expect_equal(dl2CodeBased$futilityProbabilities, dl2$futilityProbabilities, tolerance = 1e-07) expect_equal(dl2CodeBased$averageSampleNumber1, dl2$averageSampleNumber1, tolerance = 1e-07) expect_equal(dl2CodeBased$averageSampleNumber01, dl2$averageSampleNumber01, tolerance = 1e-07) expect_equal(dl2CodeBased$averageSampleNumber0, dl2$averageSampleNumber0, tolerance = 1e-07) expect_type(names(dl2), "character") df <- as.data.frame(dl2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(dl3 <- getDesignGroupSequential( kMax = 3, alpha = 0.05, beta = 0.1, sided = 1, typeOfDesign = "asP", typeBetaSpending = "bsP", informationRates = c(0.4, 0.65, 1), bindingFutility = FALSE, delayedInformation = c(0, 0.2) )) ## Comparison of the results of TrialDesignGroupSequential object 'dl3' with expected results expect_equal(dl3$power, c(0.58983431, 0.79279807, 0.9), tolerance = 1e-07) expect_equal(dl3$futilityBounds, c(0.5448398, 1.0899149), tolerance = 1e-07) expect_equal(dl3$alphaSpent, c(0.026156858, 0.037497241, 0.05), tolerance = 1e-07) expect_equal(dl3$betaSpent, c(0.052313716, 0.074994481, 0.099999999), tolerance = 1e-07) expect_equal(dl3$criticalValues, c(1.9405431, 2.0327662, 1.9734104), tolerance = 1e-07) expect_equal(dl3$stageLevels, c(0.026156858, 0.021038075, 0.02422441), tolerance = 1e-07) expect_equal(dl3$decisionCriticalValues, c(NA_real_, 1.657468, 1.9734104), tolerance = 1e-07) expect_equal(dl3$reversalProbabilities, c(NA_real_, 0.0026967589), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl3), NA))) expect_output(print(dl3)$show()) invisible(capture.output(expect_error(summary(dl3), NA))) expect_output(summary(dl3)$show()) suppressWarnings(dl3CodeBased <- eval(parse(text = getObjectRCode(dl3, stringWrapParagraphWidth = NULL)))) expect_equal(dl3CodeBased$power, dl3$power, tolerance = 1e-07) expect_equal(dl3CodeBased$futilityBounds, dl3$futilityBounds, tolerance = 1e-07) expect_equal(dl3CodeBased$alphaSpent, dl3$alphaSpent, tolerance = 1e-07) expect_equal(dl3CodeBased$betaSpent, dl3$betaSpent, tolerance = 1e-07) expect_equal(dl3CodeBased$criticalValues, dl3$criticalValues, tolerance = 1e-07) expect_equal(dl3CodeBased$stageLevels, dl3$stageLevels, tolerance = 1e-07) expect_equal(dl3CodeBased$decisionCriticalValues, dl3$decisionCriticalValues, tolerance = 1e-07) expect_equal(dl3CodeBased$reversalProbabilities, dl3$reversalProbabilities, tolerance = 1e-07) expect_type(names(dl3), "character") df <- as.data.frame(dl3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } dl4 <- getDesignCharacteristics(dl3) ## Comparison of the results of TrialDesignCharacteristics object 'dl4' with expected results expect_equal(dl4$nFixed, 8.5638474, tolerance = 1e-07) expect_equal(dl4$shift, 11.462579, tolerance = 1e-07) expect_equal(dl4$inflationFactor, 1.3384848, tolerance = 1e-07) expect_equal(dl4$information, c(4.5850317, 7.4506765, 11.462579), tolerance = 1e-07) expect_equal(dl4$power, c(0.57954342, 0.78973163, 0.9), tolerance = 1e-07) expect_equal(dl4$rejectionProbabilities, c(0.57954342, 0.21018821, 0.11026837), tolerance = 1e-07) expect_equal(dl4$futilityProbabilities, c(0.055196532, 0.024225352), tolerance = 1e-07) expect_equal(dl4$averageSampleNumber1, 0.7829433, tolerance = 1e-07) expect_equal(dl4$averageSampleNumber01, 0.89251343, tolerance = 1e-07) expect_equal(dl4$averageSampleNumber0, 0.71271214, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl4), NA))) expect_output(print(dl4)$show()) invisible(capture.output(expect_error(summary(dl4), NA))) expect_output(summary(dl4)$show()) suppressWarnings(dl4CodeBased <- eval(parse(text = getObjectRCode(dl4, stringWrapParagraphWidth = NULL)))) expect_equal(dl4CodeBased$nFixed, dl4$nFixed, tolerance = 1e-07) expect_equal(dl4CodeBased$shift, dl4$shift, tolerance = 1e-07) expect_equal(dl4CodeBased$inflationFactor, dl4$inflationFactor, tolerance = 1e-07) expect_equal(dl4CodeBased$information, dl4$information, tolerance = 1e-07) expect_equal(dl4CodeBased$power, dl4$power, tolerance = 1e-07) expect_equal(dl4CodeBased$rejectionProbabilities, dl4$rejectionProbabilities, tolerance = 1e-07) expect_equal(dl4CodeBased$futilityProbabilities, dl4$futilityProbabilities, tolerance = 1e-07) expect_equal(dl4CodeBased$averageSampleNumber1, dl4$averageSampleNumber1, tolerance = 1e-07) expect_equal(dl4CodeBased$averageSampleNumber01, dl4$averageSampleNumber01, tolerance = 1e-07) expect_equal(dl4CodeBased$averageSampleNumber0, dl4$averageSampleNumber0, tolerance = 1e-07) expect_type(names(dl4), "character") df <- as.data.frame(dl4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } expect_warning( dl5 <- getDesignGroupSequential( kMax = 3, alpha = 0.05, beta = 0.1, sided = 1, typeOfDesign = "asP", typeBetaSpending = "bsP", informationRates = c(0.4, 0.65, 1), bindingFutility = FALSE, delayedInformation = 0 ) ) ## Comparison of the results of TrialDesignGroupSequential object 'dl5' with expected results expect_equal(dl5$power, c(0.58983431, 0.79279807, 0.9), tolerance = 1e-07) expect_equal(dl5$futilityBounds, c(0.5448398, 1.0899149), tolerance = 1e-07) expect_equal(dl5$alphaSpent, c(0.026156858, 0.037497241, 0.05), tolerance = 1e-07) expect_equal(dl5$betaSpent, c(0.052313716, 0.074994481, 0.099999999), tolerance = 1e-07) expect_equal(dl5$criticalValues, c(1.9405431, 2.0327662, 1.9734104), tolerance = 1e-07) expect_equal(dl5$stageLevels, c(0.026156858, 0.021038075, 0.02422441), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl5), NA))) expect_output(print(dl5)$show()) invisible(capture.output(expect_error(summary(dl5), NA))) expect_output(summary(dl5)$show()) suppressWarnings(dl5CodeBased <- eval(parse(text = getObjectRCode(dl5, stringWrapParagraphWidth = NULL)))) expect_equal(dl5CodeBased$power, dl5$power, tolerance = 1e-07) expect_equal(dl5CodeBased$futilityBounds, dl5$futilityBounds, tolerance = 1e-07) expect_equal(dl5CodeBased$alphaSpent, dl5$alphaSpent, tolerance = 1e-07) expect_equal(dl5CodeBased$betaSpent, dl5$betaSpent, tolerance = 1e-07) expect_equal(dl5CodeBased$criticalValues, dl5$criticalValues, tolerance = 1e-07) expect_equal(dl5CodeBased$stageLevels, dl5$stageLevels, tolerance = 1e-07) expect_type(names(dl5), "character") df <- as.data.frame(dl5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } dl6 <- getDesignCharacteristics(dl5) ## Comparison of the results of TrialDesignCharacteristics object 'dl6' with expected results expect_equal(dl6$nFixed, 8.5638474, tolerance = 1e-07) expect_equal(dl6$shift, 11.746896, tolerance = 1e-07) expect_equal(dl6$inflationFactor, 1.3716844, tolerance = 1e-07) expect_equal(dl6$information, c(4.6987583, 7.6354822, 11.746896), tolerance = 1e-07) expect_equal(dl6$power, c(0.58983431, 0.79279807, 0.9), tolerance = 1e-07) expect_equal(dl6$rejectionProbabilities, c(0.58983431, 0.20296375, 0.10720193), tolerance = 1e-07) expect_equal(dl6$futilityProbabilities, c(0.052313716, 0.022680765), tolerance = 1e-07) expect_equal(dl6$averageSampleNumber1, 0.73486016, tolerance = 1e-07) expect_equal(dl6$averageSampleNumber01, 0.8455149, tolerance = 1e-07) expect_equal(dl6$averageSampleNumber0, 0.67993383, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(dl6), NA))) expect_output(print(dl6)$show()) invisible(capture.output(expect_error(summary(dl6), NA))) expect_output(summary(dl6)$show()) suppressWarnings(dl6CodeBased <- eval(parse(text = getObjectRCode(dl6, stringWrapParagraphWidth = NULL)))) expect_equal(dl6CodeBased$nFixed, dl6$nFixed, tolerance = 1e-07) expect_equal(dl6CodeBased$shift, dl6$shift, tolerance = 1e-07) expect_equal(dl6CodeBased$inflationFactor, dl6$inflationFactor, tolerance = 1e-07) expect_equal(dl6CodeBased$information, dl6$information, tolerance = 1e-07) expect_equal(dl6CodeBased$power, dl6$power, tolerance = 1e-07) expect_equal(dl6CodeBased$rejectionProbabilities, dl6$rejectionProbabilities, tolerance = 1e-07) expect_equal(dl6CodeBased$futilityProbabilities, dl6$futilityProbabilities, tolerance = 1e-07) expect_equal(dl6CodeBased$averageSampleNumber1, dl6$averageSampleNumber1, tolerance = 1e-07) expect_equal(dl6CodeBased$averageSampleNumber01, dl6$averageSampleNumber01, tolerance = 1e-07) expect_equal(dl6CodeBased$averageSampleNumber0, dl6$averageSampleNumber0, tolerance = 1e-07) expect_type(names(dl6), "character") df <- as.data.frame(dl6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(dl6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with binding futility bounds", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:criticalValuesWithFutility} # @refFS[Formula]{fs:criticalValuesWangTiatis} x8a <- getDesignGroupSequential( kMax = 4, alpha = 0.025, futilityBounds = rep(0.5244, 3), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.4 ) ## Comparison of the results of TrialDesignGroupSequential object 'x8a' with expected results expect_equal(x8a$alphaSpent, c(0.0062828133, 0.013876673, 0.02015684, 0.02499999), tolerance = 1e-07) expect_equal(x8a$criticalValues, c(2.4958485, 2.328709, 2.2361766, 2.1727623), tolerance = 1e-07) expect_equal(x8a$stageLevels, c(0.0062828133, 0.0099372444, 0.012670104, 0.014899106), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8a), NA))) expect_output(print(x8a)$show()) invisible(capture.output(expect_error(summary(x8a), NA))) expect_output(summary(x8a)$show()) x8aCodeBased <- eval(parse(text = getObjectRCode(x8a, stringWrapParagraphWidth = NULL))) expect_equal(x8aCodeBased$alphaSpent, x8a$alphaSpent, tolerance = 1e-07) expect_equal(x8aCodeBased$criticalValues, x8a$criticalValues, tolerance = 1e-07) expect_equal(x8aCodeBased$stageLevels, x8a$stageLevels, tolerance = 1e-07) expect_type(names(x8a), "character") df <- as.data.frame(x8a) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8a) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:criticalValuesWangTiatis} x8b <- getDesignGroupSequential( kMax = 3, alpha = 0.025, sided = 2, informationRates = c(0.3, 0.8, 1), typeOfDesign = "WT", deltaWT = 0.24 ) ## Comparison of the results of TrialDesignGroupSequential object 'x8b' with expected results expect_equal(x8b$alphaSpent, c(0.0013603353, 0.013978861, 0.02499999), tolerance = 1e-07) expect_equal(x8b$criticalValues, c(3.2029374, 2.4819703, 2.3420706), tolerance = 1e-07) expect_equal(x8b$stageLevels, c(0.00068016766, 0.0065329078, 0.0095885436), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8b), NA))) expect_output(print(x8b)$show()) invisible(capture.output(expect_error(summary(x8b), NA))) expect_output(summary(x8b)$show()) x8bCodeBased <- eval(parse(text = getObjectRCode(x8b, stringWrapParagraphWidth = NULL))) expect_equal(x8bCodeBased$alphaSpent, x8b$alphaSpent, tolerance = 1e-07) expect_equal(x8bCodeBased$criticalValues, x8b$criticalValues, tolerance = 1e-07) expect_equal(x8bCodeBased$stageLevels, x8b$stageLevels, tolerance = 1e-07) expect_type(names(x8b), "character") df <- as.data.frame(x8b) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8b) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:criticalValuesWangTiatis} x8c <- getDesignGroupSequential( kMax = 3, alpha = 0.025, sided = 1, informationRates = c(0.3, 0.8, 1), typeOfDesign = "WToptimum", beta = 0.23 ) ## Comparison of the results of TrialDesignGroupSequential object 'x8c' with expected results expect_equal(x8c$power, c(0.17785982, 0.63184407, 0.77), tolerance = 1e-07) expect_equal(x8c$deltaWT, 0.393, tolerance = 1e-07) expect_equal(x8c$alphaSpent, c(0.0067542296, 0.01805085, 0.025), tolerance = 1e-07) expect_equal(x8c$criticalValues, c(2.4700754, 2.2239834, 2.1715117), tolerance = 1e-07) expect_equal(x8c$stageLevels, c(0.0067542296, 0.013074779, 0.014946256), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8c), NA))) expect_output(print(x8c)$show()) invisible(capture.output(expect_error(summary(x8c), NA))) expect_output(summary(x8c)$show()) x8cCodeBased <- eval(parse(text = getObjectRCode(x8c, stringWrapParagraphWidth = NULL))) expect_equal(x8cCodeBased$power, x8c$power, tolerance = 1e-07) expect_equal(x8cCodeBased$deltaWT, x8c$deltaWT, tolerance = 1e-07) expect_equal(x8cCodeBased$alphaSpent, x8c$alphaSpent, tolerance = 1e-07) expect_equal(x8cCodeBased$criticalValues, x8c$criticalValues, tolerance = 1e-07) expect_equal(x8cCodeBased$stageLevels, x8c$stageLevels, tolerance = 1e-07) expect_type(names(x8c), "character") df <- as.data.frame(x8c) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8c) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:criticalValuesWangTiatis} x8d <- getDesignGroupSequential( kMax = 4, alpha = 0.025, sided = 2, informationRates = c(0.3, 0.6, 0.8, 1), typeOfDesign = "WToptimum", beta = 0.1, optimizationCriterion = "ASNH1" ) ## Comparison of the results of TrialDesignGroupSequential object 'x8d' with expected results expect_equal(x8d$power, c(0.27905065, 0.63899817, 0.80432197, 0.9), tolerance = 1e-07) expect_equal(x8d$deltaWT, 0.479, tolerance = 1e-07) expect_equal(x8d$alphaSpent, c(0.0082066211, 0.015417447, 0.020576899, 0.025), tolerance = 1e-07) expect_equal(x8d$criticalValues, c(2.6434487, 2.6052491, 2.5895574, 2.577451), tolerance = 1e-07) expect_equal(x8d$stageLevels, c(0.0041033106, 0.0045903747, 0.0048049705, 0.0049765989), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8d), NA))) expect_output(print(x8d)$show()) invisible(capture.output(expect_error(summary(x8d), NA))) expect_output(summary(x8d)$show()) x8dCodeBased <- eval(parse(text = getObjectRCode(x8d, stringWrapParagraphWidth = NULL))) expect_equal(x8dCodeBased$power, x8d$power, tolerance = 1e-07) expect_equal(x8dCodeBased$deltaWT, x8d$deltaWT, tolerance = 1e-07) expect_equal(x8dCodeBased$alphaSpent, x8d$alphaSpent, tolerance = 1e-07) expect_equal(x8dCodeBased$criticalValues, x8d$criticalValues, tolerance = 1e-07) expect_equal(x8dCodeBased$stageLevels, x8d$stageLevels, tolerance = 1e-07) expect_type(names(x8d), "character") df <- as.data.frame(x8d) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8d) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:criticalValuesWangTiatis} x8e <- getDesignGroupSequential( kMax = 4, alpha = 0.025, sided = 2, informationRates = c(0.3, 0.6, 0.8, 1), typeOfDesign = "WToptimum", beta = 0.1, optimizationCriterion = "ASNsum" ) ## Comparison of the results of TrialDesignGroupSequential object 'x8e' with expected results expect_equal(x8e$power, c(0.068425642, 0.50677837, 0.76253381, 0.9), tolerance = 1e-07) expect_equal(x8e$deltaWT, 0.181, tolerance = 1e-07) expect_equal(x8e$alphaSpent, c(0.00055484217, 0.0059655413, 0.01417086, 0.02499999), tolerance = 1e-07) expect_equal(x8e$criticalValues, c(3.4527796, 2.7678356, 2.5251363, 2.3516384), tolerance = 1e-07) expect_equal(x8e$stageLevels, c(0.00027742108, 0.0028214959, 0.0057826708, 0.0093454685), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8e), NA))) expect_output(print(x8e)$show()) invisible(capture.output(expect_error(summary(x8e), NA))) expect_output(summary(x8e)$show()) x8eCodeBased <- eval(parse(text = getObjectRCode(x8e, stringWrapParagraphWidth = NULL))) expect_equal(x8eCodeBased$power, x8e$power, tolerance = 1e-07) expect_equal(x8eCodeBased$deltaWT, x8e$deltaWT, tolerance = 1e-07) expect_equal(x8eCodeBased$alphaSpent, x8e$alphaSpent, tolerance = 1e-07) expect_equal(x8eCodeBased$criticalValues, x8e$criticalValues, tolerance = 1e-07) expect_equal(x8eCodeBased$stageLevels, x8e$stageLevels, tolerance = 1e-07) expect_type(names(x8e), "character") df <- as.data.frame(x8e) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8e) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with Haybittle Peto boundaries", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:criticalValuesHaybittlePeto} x9 <- getDesignGroupSequential(kMax = 4, alpha = 0.025, typeOfDesign = "HP") ## Comparison of the results of TrialDesignGroupSequential object 'x9' with expected results expect_equal(x9$alphaSpent, c(0.001349898, 0.0024617416, 0.0033695882, 0.025), tolerance = 1e-07) expect_equal(x9$criticalValues, c(3, 3, 3, 1.9827514), tolerance = 1e-07) expect_equal(x9$stageLevels, c(0.001349898, 0.001349898, 0.001349898, 0.023697604), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x9), NA))) expect_output(print(x9)$show()) invisible(capture.output(expect_error(summary(x9), NA))) expect_output(summary(x9)$show()) x9CodeBased <- eval(parse(text = getObjectRCode(x9, stringWrapParagraphWidth = NULL))) expect_equal(x9CodeBased$alphaSpent, x9$alphaSpent, tolerance = 1e-07) expect_equal(x9CodeBased$criticalValues, x9$criticalValues, tolerance = 1e-07) expect_equal(x9CodeBased$stageLevels, x9$stageLevels, tolerance = 1e-07) expect_type(names(x9), "character") df <- as.data.frame(x9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with Pampallona Tsiatis boundaries, binding and non-binding futility bounds", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:criticalValuesPampallonaTiatis} x10 <- getDesignGroupSequential( kMax = 3, alpha = 0.035, beta = 0.1, informationRates = c(0.3, 0.8, 1), typeOfDesign = "PT", sided = 1, bindingFutility = TRUE, deltaPT1 = 0.2, deltaPT0 = 0.3 ) ## Comparison of the results of TrialDesignGroupSequential object 'x10' with expected results expect_equal(x10$power, c(0.19834666, 0.83001122, 0.9), tolerance = 1e-07) expect_equal(x10$futilityBounds, c(-0.042079545, 1.4407359), tolerance = 1e-07) expect_equal(x10$alphaSpent, c(0.0038332428, 0.024917169, 0.035), tolerance = 1e-07) expect_equal(x10$betaSpent, c(0.031375368, 0.080734151, 0.1), tolerance = 1e-07) expect_equal(x10$criticalValues, c(2.6664156, 1.9867225, 1.8580792), tolerance = 1e-07) expect_equal(x10$stageLevels, c(0.0038332428, 0.023476576, 0.031578886), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x10), NA))) expect_output(print(x10)$show()) invisible(capture.output(expect_error(summary(x10), NA))) expect_output(summary(x10)$show()) x10CodeBased <- eval(parse(text = getObjectRCode(x10, stringWrapParagraphWidth = NULL))) expect_equal(x10CodeBased$power, x10$power, tolerance = 1e-07) expect_equal(x10CodeBased$futilityBounds, x10$futilityBounds, tolerance = 1e-07) expect_equal(x10CodeBased$alphaSpent, x10$alphaSpent, tolerance = 1e-07) expect_equal(x10CodeBased$betaSpent, x10$betaSpent, tolerance = 1e-07) expect_equal(x10CodeBased$criticalValues, x10$criticalValues, tolerance = 1e-07) expect_equal(x10CodeBased$stageLevels, x10$stageLevels, tolerance = 1e-07) expect_type(names(x10), "character") df <- as.data.frame(x10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:criticalValuesPampallonaTiatis} x11 <- getDesignGroupSequential( kMax = 3, alpha = 0.035, beta = 0.05, informationRates = c(0.3, 0.8, 1), typeOfDesign = "PT", sided = 2, bindingFutility = TRUE, deltaPT1 = 0.2, deltaPT0 = 0.3 ) ## Comparison of the results of TrialDesignGroupSequential object 'x11' with expected results expect_equal(x11$power, c(0.16615376, 0.88013007, 0.94999991), tolerance = 1e-07) expect_equal(x11$futilityBounds, c(NA_real_, 1.671433), tolerance = 1e-07) expect_equal(x11$alphaSpent, c(0.0019236202, 0.022017713, 0.035), tolerance = 1e-07) expect_equal(x11$betaSpent, c(0, 0.035025978, 0.05), tolerance = 1e-07) expect_equal(x11$criticalValues, c(3.1017782, 2.3111074, 2.1614596), tolerance = 1e-07) expect_equal(x11$stageLevels, c(0.00096181012, 0.010413463, 0.015329928), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x11), NA))) expect_output(print(x11)$show()) invisible(capture.output(expect_error(summary(x11), NA))) expect_output(summary(x11)$show()) x11CodeBased <- eval(parse(text = getObjectRCode(x11, stringWrapParagraphWidth = NULL))) expect_equal(x11CodeBased$power, x11$power, tolerance = 1e-07) expect_equal(x11CodeBased$futilityBounds, x11$futilityBounds, tolerance = 1e-07) expect_equal(x11CodeBased$alphaSpent, x11$alphaSpent, tolerance = 1e-07) expect_equal(x11CodeBased$betaSpent, x11$betaSpent, tolerance = 1e-07) expect_equal(x11CodeBased$criticalValues, x11$criticalValues, tolerance = 1e-07) expect_equal(x11CodeBased$stageLevels, x11$stageLevels, tolerance = 1e-07) expect_type(names(x11), "character") df <- as.data.frame(x11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:criticalValuesPampallonaTiatis} x12 <- getDesignGroupSequential( kMax = 3, alpha = 0.035, beta = 0.05, informationRates = c(0.3, 0.8, 1), typeOfDesign = "PT", sided = 2, bindingFutility = FALSE, deltaPT1 = 0.2, deltaPT0 = 0.3 ) ## Comparison of the results of TrialDesignGroupSequential object 'x12' with expected results expect_equal(x12$power, c(0.15712278, 0.87874666, 0.94999994), tolerance = 1e-07) expect_equal(x12$futilityBounds, c(NA_real_, 1.7090472), tolerance = 1e-07) expect_equal(x12$alphaSpent, c(0.0015647742, 0.019435851, 0.035), tolerance = 1e-07) expect_equal(x12$betaSpent, c(0, 0.034947415, 0.05), tolerance = 1e-07) expect_equal(x12$criticalValues, c(3.1623945, 2.356272, 2.2036998), tolerance = 1e-07) expect_equal(x12$stageLevels, c(0.00078238709, 0.0092296971, 0.013772733), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x12), NA))) expect_output(print(x12)$show()) invisible(capture.output(expect_error(summary(x12), NA))) expect_output(summary(x12)$show()) x12CodeBased <- eval(parse(text = getObjectRCode(x12, stringWrapParagraphWidth = NULL))) expect_equal(x12CodeBased$power, x12$power, tolerance = 1e-07) expect_equal(x12CodeBased$futilityBounds, x12$futilityBounds, tolerance = 1e-07) expect_equal(x12CodeBased$alphaSpent, x12$alphaSpent, tolerance = 1e-07) expect_equal(x12CodeBased$betaSpent, x12$betaSpent, tolerance = 1e-07) expect_equal(x12CodeBased$criticalValues, x12$criticalValues, tolerance = 1e-07) expect_equal(x12CodeBased$stageLevels, x12$stageLevels, tolerance = 1e-07) expect_type(names(x12), "character") df <- as.data.frame(x12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:criticalValuesPampallonaTiatis} x13 <- getDesignGroupSequential( kMax = 4, alpha = 0.035, beta = 0.05, informationRates = c(0.2, 0.4, 0.8, 1), typeOfDesign = "PT", sided = 1, bindingFutility = FALSE, deltaPT1 = 0.1, deltaPT0 = 0.45 ) ## Comparison of the results of TrialDesignGroupSequential object 'x13' with expected results expect_equal(x13$power, c(0.029518378, 0.38853658, 0.90760886, 0.95), tolerance = 1e-07) expect_equal(x13$futilityBounds, c(-0.41499566, 0.38106631, 1.4738957), tolerance = 1e-07) expect_equal(x13$alphaSpent, c(0.00014050218, 0.0030266381, 0.0199021, 0.035), tolerance = 1e-07) expect_equal(x13$betaSpent, c(0.015413989, 0.028721092, 0.043215976, 0.049999999), tolerance = 1e-07) expect_equal(x13$criticalValues, c(3.6322099, 2.7527004, 2.0861568, 1.9080201), tolerance = 1e-07) expect_equal(x13$stageLevels, c(0.00014050218, 0.002955298, 0.018482211, 0.02819431), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x13), NA))) expect_output(print(x13)$show()) invisible(capture.output(expect_error(summary(x13), NA))) expect_output(summary(x13)$show()) x13CodeBased <- eval(parse(text = getObjectRCode(x13, stringWrapParagraphWidth = NULL))) expect_equal(x13CodeBased$power, x13$power, tolerance = 1e-07) expect_equal(x13CodeBased$futilityBounds, x13$futilityBounds, tolerance = 1e-07) expect_equal(x13CodeBased$alphaSpent, x13$alphaSpent, tolerance = 1e-07) expect_equal(x13CodeBased$betaSpent, x13$betaSpent, tolerance = 1e-07) expect_equal(x13CodeBased$criticalValues, x13$criticalValues, tolerance = 1e-07) expect_equal(x13CodeBased$stageLevels, x13$stageLevels, tolerance = 1e-07) expect_type(names(x13), "character") df <- as.data.frame(x13) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x13) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignGroupSequential} # @refFS[Formula]{fs:criticalValuesPampallonaTiatis} x14 <- getDesignGroupSequential( kMax = 6, alpha = 0.25, beta = 0.01, typeOfDesign = "PT", sided = 2, bindingFutility = TRUE, deltaPT1 = 0.02, deltaPT0 = 0.49, twoSidedPower = TRUE ) ## Comparison of the results of TrialDesignGroupSequential object 'x14' with expected results expect_equal(x14$power, c(0.076493626, 0.52863814, 0.83456395, 0.94950066, 0.98346861, 0.99), tolerance = 1e-07) expect_equal(x14$futilityBounds, c(NA_real_, NA_real_, 0.12661836, 0.55308248, 0.92800873), tolerance = 1e-07) expect_equal(x14$alphaSpent, c(0.0027626806, 0.03301126, 0.088857236, 0.15440485, 0.2156594, 0.25), tolerance = 1e-07) expect_equal(x14$betaSpent, c(0, 0, 0.0026196847, 0.0066701045, 0.008949341, 0.01), tolerance = 1e-07) expect_equal(x14$criticalValues, c(2.9929798, 2.1458995, 1.7663859, 1.5385619, 1.3822869, 1.2664591), tolerance = 1e-07) expect_equal(x14$stageLevels, c(0.0013813403, 0.015940498, 0.038665568, 0.061955638, 0.08344182, 0.10267438), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x14), NA))) expect_output(print(x14)$show()) invisible(capture.output(expect_error(summary(x14), NA))) expect_output(summary(x14)$show()) x14CodeBased <- eval(parse(text = getObjectRCode(x14, stringWrapParagraphWidth = NULL))) expect_equal(x14CodeBased$power, x14$power, tolerance = 1e-07) expect_equal(x14CodeBased$futilityBounds, x14$futilityBounds, tolerance = 1e-07) expect_equal(x14CodeBased$alphaSpent, x14$alphaSpent, tolerance = 1e-07) expect_equal(x14CodeBased$betaSpent, x14$betaSpent, tolerance = 1e-07) expect_equal(x14CodeBased$criticalValues, x14$criticalValues, tolerance = 1e-07) expect_equal(x14CodeBased$stageLevels, x14$stageLevels, tolerance = 1e-07) expect_type(names(x14), "character") df <- as.data.frame(x14) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x14) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignGroupSequential' with type of design = 'noEarlyEfficacy'", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getDesignInverseNormal} # @refFS[Formula]{fs:alphaSpendingConcept} x15 <- getDesignGroupSequential( typeOfDesign = "noEarlyEfficacy", futilityBounds = c(0, 0.5) ) ## Comparison of the results of TrialDesignGroupSequential object 'x15' with expected results expect_equal(x15$alphaSpent, c(6.6613381e-16, -1.3145041e-13, 0.025), tolerance = 1e-07) expect_equal(x15$criticalValues, c(Inf, Inf, 1.959964), tolerance = 1e-07) expect_equal(x15$stageLevels, c(0, 0, 0.025), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x15), NA))) expect_output(print(x15)$show()) invisible(capture.output(expect_error(summary(x15), NA))) expect_output(summary(x15)$show()) x15CodeBased <- eval(parse(text = getObjectRCode(x15, stringWrapParagraphWidth = NULL))) expect_equal(x15CodeBased$alphaSpent, x15$alphaSpent, tolerance = 1e-07) expect_equal(x15CodeBased$criticalValues, x15$criticalValues, tolerance = 1e-07) expect_equal(x15CodeBased$stageLevels, x15$stageLevels, tolerance = 1e-07) expect_type(names(x15), "character") df <- as.data.frame(x15) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x15) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x16 <- getDesignGroupSequential( typeOfDesign = "noEarlyEfficacy", futilityBounds = c(0, 0.5, 1), bindingFutility = TRUE ) ## Comparison of the results of TrialDesignGroupSequential object 'x16' with expected results expect_equal(x16$alphaSpent, c(6.6613381e-16, 1.110223e-15, 4.8067383e-11, 0.02499999), tolerance = 1e-07) expect_equal(x16$criticalValues, c(Inf, Inf, Inf, 1.8848634), tolerance = 1e-07) expect_equal(x16$stageLevels, c(0, 0, 0, 0.029724142), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x16), NA))) expect_output(print(x16)$show()) invisible(capture.output(expect_error(summary(x16), NA))) expect_output(summary(x16)$show()) x16CodeBased <- eval(parse(text = getObjectRCode(x16, stringWrapParagraphWidth = NULL))) expect_equal(x16CodeBased$alphaSpent, x16$alphaSpent, tolerance = 1e-07) expect_equal(x16CodeBased$criticalValues, x16$criticalValues, tolerance = 1e-07) expect_equal(x16CodeBased$stageLevels, x16$stageLevels, tolerance = 1e-07) expect_type(names(x16), "character") df <- as.data.frame(x16) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x16) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignInverseNormal': illegal arguments throw exceptions as expected", { expect_error(getDesignInverseNormal( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.025), kMax = 4 ), paste0( "Conflicting arguments: length of 'userAlphaSpending' (5) ", "must be equal to 'kMax' (4)" ), fixed = TRUE ) expect_error(getDesignInverseNormal( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.021) ), paste0( "'userAlphaSpending' = c(0.01, 0.02, 0.023, 0.023, 0.021) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_5 <= alpha = 0.021" ), fixed = TRUE ) expect_error(getDesignInverseNormal( typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.023), alpha = 0.02 ), paste0( "'userAlphaSpending' = c(0.01, 0.02, 0.023) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_3 <= alpha = 0.02" ), fixed = TRUE ) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = NA_real_), "Missing argument: parameter 'deltaWT' must be specified in design", fixed = TRUE ) expect_error(getDesignInverseNormal( typeOfDesign = C_TYPE_OF_DESIGN_WT_OPTIMUM, optimizationCriterion = "x" ), "Illegal argument: optimization criterion must be one of the following: 'ASNH1', 'ASNIFH1', 'ASNsum'", fixed = TRUE ) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_KD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE ) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_HSD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE ) expect_error(getDesignInverseNormal(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER), "Missing argument: parameter 'userAlphaSpending' must be specified in design", fixed = TRUE ) expect_error(getDesignInverseNormal( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = "x" ), "Illegal argument: type of beta spending must be one of the following: 'none', 'bsP', 'bsOF', 'bsKD', 'bsHSD', 'bsUser'", fixed = TRUE ) expect_error(getDesignInverseNormal( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER ), "Missing argument: parameter 'userBetaSpending' must be specified in design", fixed = TRUE ) expect_error(getDesignInverseNormal( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2) ), paste0( "Conflicting arguments: length of 'userBetaSpending' (2) must ", "be equal to length of 'informationRates' (3)" ), fixed = TRUE ) expect_error(getDesignInverseNormal( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.2, 0.1, 0.05) ), paste0( "'userBetaSpending' = c(0.2, 0.1, 0.05) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.05" ), fixed = TRUE ) expect_error(getDesignInverseNormal( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2, 0.3), beta = 0.2 ), paste0( "'userBetaSpending' = c(0.1, 0.2, 0.3) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.2" ), fixed = TRUE ) expect_error(getDesignInverseNormal(kMax = Inf), paste0( "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]" ), fixed = TRUE ) expect_error(getDesignInverseNormal(kMax = -Inf), paste0( "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]" ), fixed = TRUE ) expect_error(getDesignInverseNormal(kMax = -Inf), "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -10), "Argument out of bounds: 'kMax' (-10) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -9), "Argument out of bounds: 'kMax' (-9) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -8), "Argument out of bounds: 'kMax' (-8) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -7), "Argument out of bounds: 'kMax' (-7) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -6), "Argument out of bounds: 'kMax' (-6) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -5), "Argument out of bounds: 'kMax' (-5) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -4), "Argument out of bounds: 'kMax' (-4) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -3), "Argument out of bounds: 'kMax' (-3) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -2), "Argument out of bounds: 'kMax' (-2) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = -1), "Argument out of bounds: 'kMax' (-1) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 0), "Argument out of bounds: 'kMax' (0) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 21), "Argument out of bounds: 'kMax' (21) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 22), "Argument out of bounds: 'kMax' (22) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 23), "Argument out of bounds: 'kMax' (23) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 24), "Argument out of bounds: 'kMax' (24) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 25), "Argument out of bounds: 'kMax' (25) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 26), "Argument out of bounds: 'kMax' (26) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 27), "Argument out of bounds: 'kMax' (27) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 28), "Argument out of bounds: 'kMax' (28) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 29), "Argument out of bounds: 'kMax' (29) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 30), "Argument out of bounds: 'kMax' (30) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = Inf), "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 2, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (2) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 3, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (3) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 4, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (4) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 6, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (6) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 7, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (7) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 8, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (8) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 9, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (9) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 10, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (10) - 1", fixed = TRUE) expect_warning(expect_error(getDesignInverseNormal(kMax = 11, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (11) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 12, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (12) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 13, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (13) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 14, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (14) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 15, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (15) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 16, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (16) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 17, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (17) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 18, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (18) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 19, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (19) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 20, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (20) - 1", fixed = TRUE)) expect_error(getDesignInverseNormal(futilityBounds = c(-7, 5)), "Illegal argument: 'futilityBounds' (-7, 5) too extreme for this situation", fixed = TRUE ) expect_error(getDesignInverseNormal(futilityBounds = c(1, 7)), "Argument out of bounds: 'futilityBounds' (1, 7) is out of bounds [-Inf; 6]", fixed = TRUE ) }) test_that("'getDesignGroupSequential': illegal arguments throw exceptions as expected", { expect_error(getDesignGroupSequential( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.025), kMax = 4 ), paste0( "Conflicting arguments: length of 'userAlphaSpending' (5) ", "must be equal to 'kMax' (4)" ), fixed = TRUE ) expect_error(getDesignGroupSequential( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.021) ), paste0( "'userAlphaSpending' = c(0.01, 0.02, 0.023, 0.023, 0.021) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_5 <= alpha = 0.021" ), fixed = TRUE ) expect_error(getDesignGroupSequential( typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.023), alpha = 0.02 ), paste0( "'userAlphaSpending' = c(0.01, 0.02, 0.023) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_3 <= alpha = 0.02" ), fixed = TRUE ) expect_equal(getDesignGroupSequential( typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.02, 0.023) )$alpha, 0.023) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_WT, deltaWT = NA_real_), "Missing argument: parameter 'deltaWT' must be specified in design", fixed = TRUE ) expect_error(getDesignGroupSequential( typeOfDesign = C_TYPE_OF_DESIGN_WT_OPTIMUM, optimizationCriterion = "x" ), "Illegal argument: optimization criterion must be one of the following: 'ASNH1', 'ASNIFH1', 'ASNsum'", fixed = TRUE ) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_KD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE ) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_HSD, gammaA = NA_real_), "Missing argument: parameter 'gammaA' must be specified in design", fixed = TRUE ) expect_error(getDesignGroupSequential(typeOfDesign = C_TYPE_OF_DESIGN_AS_USER), "Missing argument: parameter 'userAlphaSpending' must be specified in design", fixed = TRUE ) expect_error(getDesignGroupSequential( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = "x" ), paste0( "Illegal argument: type of beta spending must be one of the following: ", "'none', 'bsP', 'bsOF', 'bsKD', 'bsHSD', 'bsUser'" ), fixed = TRUE ) expect_error(getDesignGroupSequential( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER ), "Missing argument: parameter 'userBetaSpending' must be specified in design", fixed = TRUE ) expect_error(getDesignGroupSequential( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2) ), paste0( "Conflicting arguments: length of 'userBetaSpending' (2) must ", "be equal to length of 'informationRates' (3)" ), fixed = TRUE ) expect_error(getDesignGroupSequential( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.2, 0.1, 0.05) ), paste0( "'userBetaSpending' = c(0.2, 0.1, 0.05) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.05" ), fixed = TRUE ) expect_error(getDesignGroupSequential( typeOfDesign = C_TYPE_OF_DESIGN_AS_USER, userAlphaSpending = c(0.01, 0.02, 0.025), typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER, userBetaSpending = c(0.1, 0.2, 0.3), beta = 0.2 ), paste0( "'userBetaSpending' = c(0.1, 0.2, 0.3) must be a vector that satisfies the ", "following condition: 0 <= beta_1 <= .. <= beta_3 <= beta = 0.2" ), fixed = TRUE ) expect_error(getDesignGroupSequential(kMax = Inf), paste0( "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]" ), fixed = TRUE ) expect_error(getDesignGroupSequential(kMax = -Inf), paste0( "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND, "]" ), fixed = TRUE ) expect_error(getDesignInverseNormal(kMax = -5), "Argument out of bounds: 'kMax' (-5) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 0), "Argument out of bounds: 'kMax' (0) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 21), "Argument out of bounds: 'kMax' (21) is out of bounds [1; 20]", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 2, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (2) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 3, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (3) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 4, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (4) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 6, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (6) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 7, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (7) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 8, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (8) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 9, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (9) - 1", fixed = TRUE) expect_error(getDesignInverseNormal(kMax = 10, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (10) - 1", fixed = TRUE) expect_warning(expect_error(getDesignInverseNormal(kMax = 11, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (11) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 12, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (12) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 13, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (13) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 14, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (14) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 15, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (15) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 16, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (16) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 17, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (17) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 18, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (18) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 19, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (19) - 1", fixed = TRUE)) expect_warning(expect_error(getDesignInverseNormal(kMax = 20, futilityBounds = c(0, 0, 1, 2)), "Conflicting arguments: length of 'futilityBounds' (4) must be equal to 'kMax' (20) - 1", fixed = TRUE)) expect_error(getDesignGroupSequential(futilityBounds = c(-7, 5)), "Illegal argument: 'futilityBounds' (-7, 5) too extreme for this situation", fixed = TRUE ) expect_error(getDesignGroupSequential(futilityBounds = c(1, 7)), "Argument out of bounds: 'futilityBounds' (1, 7) is out of bounds [-Inf; 6]", fixed = TRUE ) }) rpact/tests/testthat/test-f_simulation_performance_score.R0000644000176200001440000001361314450463134023742 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_simulation_multiarm_survival.R ## | Creation date: 06 February 2023, 12:14:51 ## | File version: $Revision: 7147 $ ## | Last changed: $Date: 2023-07-03 08:10:31 +0200 (Mo, 03 Jul 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Performance Score Functions") test_that("Simulation performance score functions throw errors when arguments are missing or wrong", { expect_error(getPerformanceScore()) }) # Mock a correct SimulationResult object createCorrectSimulationResultObject <- function() { simulationResult <- list( .design = list( bindingFutility = TRUE, kMax = 2, alpha = 0.05, beta = 0.2 ), show = function(...) {}, .show = function(...) {}, alternative = c(0.5, 1, 1.5), plannedSubjects = c(30, 60), maxNumberOfSubjectsPerStage = c(30, 60), maxNumberOfIterations = 1000, normalApproximation = TRUE, groups = 2, stDev = 1, conditionalPower = NA, .data = data.frame( alternative = rep(c(0.5, 1, 1.5), times = 1000), stageNumber = sample(c(1, 2), size = 3000, replace = TRUE), numberOfCumulatedSubjects = sample(c(30, 60), size = 3000, replace = TRUE), conditionalPowerAchieved = runif(3000, 0, 1) ) ) class(simulationResult) <- c("SimulationResults", class(simulationResult)) return(simulationResult) } .assertIsSimulationResults <- function(simulationResult) { if (!inherits(simulationResult, "SimulationResults")) { stop("Expected 'simulationResult' to be an object of class 'SimulationResults'") } } createNonMeansSimulationResultObject <- function() { simulationResult <- createCorrectSimulationResultObject() class(simulationResult) <- c("SimulationResultsNonMeans", class(simulationResult)) return(simulationResult) } test_that("getPerformanceScore handles SimulationResultsMeans", { simulationResult <- createNonMeansSimulationResultObject() expect_error( getPerformanceScore(simulationResult), "Illegal argument: performance score so far implemented only for single comparisons with continuous endpoints" ) }) createCorrectSimulationResultObject <- function() { simulationResult <- list( .design = list( bindingFutility = TRUE, kMax = 2, alpha = 0.05, beta = 0.2 ), show = function(...) {}, .show = function(...) {}, alternative = c(0.5, 1, 1.5), plannedSubjects = c(30, 60), maxNumberOfSubjectsPerStage = c(30, 60), maxNumberOfIterations = 1000, normalApproximation = TRUE, groups = 2, stDev = 1, conditionalPower = NA, .data = data.frame( alternative = rep(c(0.5, 1, 1.5), times = 1000), stageNumber = sample(c(1, 2), size = 3000, replace = TRUE), numberOfCumulatedSubjects = sample(c(30, 60), size = 3000, replace = TRUE), conditionalPowerAchieved = runif(3000, 0, 1) ) ) class(simulationResult) <- c("SimulationResultsMeans", "SimulationResults", class(simulationResult)) return(simulationResult) } # 1. Test for a simulationResult that does not have `bindingFutility = TRUE`. test_that("getPerformanceScore handles non-binding futility", { simulationResult <- createCorrectSimulationResultObject() simulationResult$.design$bindingFutility <- FALSE expect_warning(getPerformanceScore(simulationResult)) }) # 2. Test for a simulationResult that does not have `kMax = 2`. test_that("getPerformanceScore handles non-two-stage designs", { simulationResult <- createCorrectSimulationResultObject() simulationResult$.design$kMax <- 3 expect_error( getPerformanceScore(simulationResult), "Illegal argument: performance score so far implemented only for two-stage designs" ) }) # 3. Test for a simulationResult that has a non-null `conditionalPower`. test_that("getPerformanceScore handles non-null conditionalPower", { simulationResult <- createCorrectSimulationResultObject() simulationResult$conditionalPower <- 0.8 suppressWarnings(expect_type(getPerformanceScore(simulationResult), "S4")) }) # 4. Test to verify the correctness of the performance score calculation. test_that("getPerformanceScore calculates performance score correctly", { simulationResult <- createCorrectSimulationResultObject() suppressWarnings(scores <- getPerformanceScore(simulationResult)) expect_type(scores, "S4") }) # 5. Test to verify that the warning about the function being experimental is issued. test_that("getPerformanceScore issues warning", { simulationResult <- createCorrectSimulationResultObject() expect_warning( getPerformanceScore(simulationResult), "The performance score function is experimental and hence not fully validated" ) }) # 6. Test to check if the correct values are returned in the resultList. test_that("getPerformanceScore returns correct resultList", { simulationResult <- createCorrectSimulationResultObject() suppressWarnings(result <- getPerformanceScore(simulationResult)) expect_type(result, "S4") }) rpact/tests/testthat/test-f_simulation_enrichment_survival.R0000644000176200001440000020106214446750002024330 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_simulation_enrichment_survival.R ## | Creation date: 27 June 2023, 14:05:57 ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Simulation Enrichment Survival Function") test_that("'getSimulationEnrichmentSurvival': gMax = 2", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:subsec:adaptiveClosedTestProcedureEnrichment} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichment} # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:adjustedPValueSpiessensDeboisEnrichmentSurvival} # @refFS[Formula]{fs:simulationEnrichmentSurvivalEvents} # @refFS[Formula]{fs:simulationEnrichmentSurvivalEventsWithControls} # @refFS[Formula]{fs:simulationEnrichmentSurvivalLogRanks} # @refFS[Formula]{fs:simulationEnrichmentSurvivalAdjustedPrevalances} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} hazardRatios <- matrix(c( 1.000000, 1.207775, 1.432188, 1.676140, 1.943358, 2.238755, 2.568980, 1.432188, 1.432188, 1.432188, 1.432188, 1.432188, 1.432188, 1.432188 ), ncol = 2) effectList1 <- list( subGroups = c("S", "R"), prevalences = c(0.4, 0.6), hazardRatios = hazardRatios, piControls = c(0.1, 0.3) ) design <- getDesignInverseNormal( informationRates = c(0.3, 1), typeOfDesign = "asUser", userAlphaSpending = c(0.01, 0.025) ) suppressWarnings(simResult1 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(40, 120), effectList = effectList1, maxNumberOfIterations = 100, allocationRatioPlanned = 1, typeOfSelection = "rbest", rValue = 2, intersectionTest = "SpiessensDebois", directionUpper = TRUE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult1' with expected results expect_equal(simResult1$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult1$iterations[2, ], c(100, 99, 99, 98, 95, 98, 91)) expect_equal(simResult1$rejectAtLeastOne, c(0.25, 0.26, 0.44, 0.49, 0.63, 0.52, 0.78), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$rejectedPopulationsPerStage)), c(0, 0, 0.02, 0.05, 0.04, 0.07, 0.03, 0.14, 0.09, 0.24, 0.06, 0.36, 0.2, 0.47, 0.08, 0.17, 0.02, 0.21, 0.09, 0.33, 0.1, 0.37, 0.12, 0.47, 0.09, 0.42, 0.15, 0.52), tolerance = 1e-07) expect_equal(simResult1$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult1$earlyStop[1, ], c(0, 0.01, 0.01, 0.02, 0.05, 0.02, 0.09), tolerance = 1e-07) expect_equal(simResult1$successPerStage[1, ], c(0, 0.01, 0.01, 0.02, 0.05, 0.02, 0.09), tolerance = 1e-07) expect_equal(simResult1$successPerStage[2, ], c(0, 0.03, 0.08, 0.13, 0.24, 0.39, 0.47), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$selectedPopulations)), c(1, 1, 1, 0.99, 1, 0.99, 1, 0.98, 1, 0.95, 1, 0.98, 1, 0.91, 1, 1, 1, 0.99, 1, 0.99, 1, 0.98, 1, 0.95, 1, 0.98, 1, 0.91), tolerance = 1e-07) expect_equal(simResult1$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult1$numberOfPopulations[2, ], c(2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult1$expectedNumberOfEvents, c(120, 119.2, 119.2, 118.4, 116, 118.4, 112.8), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$singleNumberOfEventsPerStage)), c(6.4000002, 12.8, 6.9157979, 13.831596, 7.4434489, 14.886898, 7.9849789, 15.969958, 8.5428984, 17.085797, 9.1204104, 18.240821, 9.7216794, 19.443359, 33.6, 67.2, 33.084202, 66.168404, 32.556551, 65.113102, 32.015021, 64.030042, 31.457102, 62.914203, 30.87959, 61.759179, 30.278321, 60.556641), tolerance = 1e-07) expect_equal(simResult1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult1$conditionalPowerAchieved[2, ], c(0.15269538, 0.28989266, 0.28620537, 0.37651752, 0.47283912, 0.47668973, 0.531968), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult1), NA))) expect_output(print(simResult1)$show()) invisible(capture.output(expect_error(summary(simResult1), NA))) expect_output(summary(simResult1)$show()) suppressWarnings(simResult1CodeBased <- eval(parse(text = getObjectRCode(simResult1, stringWrapParagraphWidth = NULL)))) expect_equal(simResult1CodeBased$eventsPerStage, simResult1$eventsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$iterations, simResult1$iterations, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectAtLeastOne, simResult1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectedPopulationsPerStage, simResult1$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$futilityPerStage, simResult1$futilityPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$earlyStop, simResult1$earlyStop, tolerance = 1e-05) expect_equal(simResult1CodeBased$successPerStage, simResult1$successPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$selectedPopulations, simResult1$selectedPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$numberOfPopulations, simResult1$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$expectedNumberOfEvents, simResult1$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult1CodeBased$singleNumberOfEventsPerStage, simResult1$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$conditionalPowerAchieved, simResult1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult1), "character") df <- as.data.frame(simResult1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult2 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(40, 120), effectList = effectList1, maxNumberOfIterations = 100, allocationRatioPlanned = 1, typeOfSelection = "rbest", rValue = 2, intersectionTest = "Simes", directionUpper = TRUE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult2' with expected results expect_equal(simResult2$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult2$iterations[2, ], c(100, 99, 99, 98, 93, 98, 91)) expect_equal(simResult2$rejectAtLeastOne, c(0.25, 0.24, 0.4, 0.47, 0.62, 0.52, 0.79), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$rejectedPopulationsPerStage)), c(0, 0.02, 0.02, 0.05, 0.04, 0.07, 0.03, 0.14, 0.11, 0.22, 0.06, 0.36, 0.2, 0.48, 0.08, 0.16, 0.02, 0.19, 0.09, 0.29, 0.1, 0.35, 0.14, 0.44, 0.09, 0.41, 0.15, 0.53), tolerance = 1e-07) expect_equal(simResult2$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult2$earlyStop[1, ], c(0, 0.01, 0.01, 0.02, 0.07, 0.02, 0.09), tolerance = 1e-07) expect_equal(simResult2$successPerStage[1, ], c(0, 0.01, 0.01, 0.02, 0.07, 0.02, 0.09), tolerance = 1e-07) expect_equal(simResult2$successPerStage[2, ], c(0.01, 0.03, 0.08, 0.13, 0.22, 0.38, 0.48), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$selectedPopulations)), c(1, 1, 1, 0.99, 1, 0.99, 1, 0.98, 1, 0.93, 1, 0.98, 1, 0.91, 1, 1, 1, 0.99, 1, 0.99, 1, 0.98, 1, 0.93, 1, 0.98, 1, 0.91), tolerance = 1e-07) expect_equal(simResult2$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult2$numberOfPopulations[2, ], c(2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult2$expectedNumberOfEvents, c(120, 119.2, 119.2, 118.4, 114.4, 118.4, 112.8), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$singleNumberOfEventsPerStage)), c(6.4000002, 12.8, 6.9157979, 13.831596, 7.4434489, 14.886898, 7.9849789, 15.969958, 8.5428984, 17.085797, 9.1204104, 18.240821, 9.7216794, 19.443359, 33.6, 67.2, 33.084202, 66.168404, 32.556551, 65.113102, 32.015021, 64.030042, 31.457102, 62.914203, 30.87959, 61.759179, 30.278321, 60.556641), tolerance = 1e-07) expect_equal(simResult2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult2$conditionalPowerAchieved[2, ], c(0.15269538, 0.28989266, 0.28620537, 0.37651752, 0.46167737, 0.47668973, 0.531968), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult2), NA))) expect_output(print(simResult2)$show()) invisible(capture.output(expect_error(summary(simResult2), NA))) expect_output(summary(simResult2)$show()) suppressWarnings(simResult2CodeBased <- eval(parse(text = getObjectRCode(simResult2, stringWrapParagraphWidth = NULL)))) expect_equal(simResult2CodeBased$eventsPerStage, simResult2$eventsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$iterations, simResult2$iterations, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectAtLeastOne, simResult2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectedPopulationsPerStage, simResult2$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$futilityPerStage, simResult2$futilityPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$earlyStop, simResult2$earlyStop, tolerance = 1e-05) expect_equal(simResult2CodeBased$successPerStage, simResult2$successPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$selectedPopulations, simResult2$selectedPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$numberOfPopulations, simResult2$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$expectedNumberOfEvents, simResult2$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult2CodeBased$singleNumberOfEventsPerStage, simResult2$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$conditionalPowerAchieved, simResult2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult2), "character") df <- as.data.frame(simResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } effectList2 <- list( subGroups = c("S", "R"), prevalences = c(0.4, 0.6), hazardRatios = hazardRatios ) suppressWarnings(simResult3 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(40, 120), effectList = effectList2, maxNumberOfIterations = 100, allocationRatioPlanned = 1, typeOfSelection = "best", intersectionTest = "Sidak", directionUpper = TRUE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult3' with expected results expect_equal(simResult3$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult3$iterations[2, ], c(100, 99, 97, 96, 90, 85, 72)) expect_equal(simResult3$rejectAtLeastOne, c(0.14, 0.16, 0.4, 0.67, 0.84, 0.92, 0.95), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$rejectedPopulationsPerStage)), c(0, 0.01, 0.02, 0.04, 0.05, 0.15, 0.04, 0.36, 0.16, 0.46, 0.21, 0.58, 0.35, 0.45, 0.05, 0.08, 0.01, 0.1, 0.1, 0.13, 0.14, 0.23, 0.16, 0.17, 0.22, 0.09, 0.33, 0.11), tolerance = 1e-07) expect_equal(simResult3$futilityPerStage[1, ], c(0, 0, 0, 0, 0, 0, 0)) expect_equal(simResult3$earlyStop[1, ], c(0, 0.01, 0.03, 0.04, 0.1, 0.15, 0.28), tolerance = 1e-07) expect_equal(simResult3$successPerStage[1, ], c(0, 0.01, 0.03, 0.04, 0.1, 0.15, 0.28), tolerance = 1e-07) expect_equal(simResult3$successPerStage[2, ], c(0.11, 0.15, 0.37, 0.62, 0.74, 0.77, 0.67), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$selectedPopulations)), c(1, 0.32, 1, 0.41, 1, 0.48, 1, 0.49, 1, 0.56, 1, 0.68, 1, 0.52, 1, 0.68, 1, 0.58, 1, 0.49, 1, 0.47, 1, 0.34, 1, 0.17, 1, 0.2), tolerance = 1e-07) expect_equal(simResult3$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult3$numberOfPopulations[2, ], c(1, 1, 1, 1, 1, 1, 1)) expect_equal(simResult3$expectedNumberOfEvents, c(120, 119.2, 117.6, 116.8, 112, 108, 97.6), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$singleNumberOfEventsPerStage)), c(14.163599, 44.862494, 15.080284, 50.80114, 16, 55.752577, 16.925752, 57.406466, 17.861157, 63.272875, 18.810731, 71.524292, 19.780244, 68.766802, 25.836401, 35.137506, 24.919716, 29.19886, 24, 24.247423, 23.074248, 22.593534, 22.138843, 16.727125, 21.189269, 8.4757076, 20.219756, 11.233198), tolerance = 1e-07) expect_equal(simResult3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult3$conditionalPowerAchieved[2, ], c(0.13647132, 0.29139937, 0.31543636, 0.43117712, 0.58131066, 0.58903037, 0.64693196), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult3), NA))) expect_output(print(simResult3)$show()) invisible(capture.output(expect_error(summary(simResult3), NA))) expect_output(summary(simResult3)$show()) suppressWarnings(simResult3CodeBased <- eval(parse(text = getObjectRCode(simResult3, stringWrapParagraphWidth = NULL)))) expect_equal(simResult3CodeBased$eventsPerStage, simResult3$eventsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$iterations, simResult3$iterations, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectAtLeastOne, simResult3$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectedPopulationsPerStage, simResult3$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$futilityPerStage, simResult3$futilityPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$earlyStop, simResult3$earlyStop, tolerance = 1e-05) expect_equal(simResult3CodeBased$successPerStage, simResult3$successPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$selectedPopulations, simResult3$selectedPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$numberOfPopulations, simResult3$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$expectedNumberOfEvents, simResult3$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult3CodeBased$singleNumberOfEventsPerStage, simResult3$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$conditionalPowerAchieved, simResult3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult3), "character") df <- as.data.frame(simResult3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult4 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(40, 120), effectList = effectList2, maxNumberOfIterations = 100, allocationRatioPlanned = 1, typeOfSelection = "epsilon", epsilonValue = 0.1, intersectionTest = "Bonferroni", directionUpper = TRUE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult4' with expected results expect_equal(simResult4$iterations[1, ], c(100, 100, 100, 100, 100, 100, 100)) expect_equal(simResult4$iterations[2, ], c(87, 88, 91, 93, 89, 83, 71)) expect_equal(simResult4$rejectAtLeastOne, c(0.12, 0.15, 0.39, 0.63, 0.83, 0.91, 0.94), tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$rejectedPopulationsPerStage)), c(0, 0.01, 0.02, 0.04, 0.05, 0.15, 0.04, 0.32, 0.16, 0.48, 0.21, 0.62, 0.35, 0.46, 0.05, 0.06, 0.01, 0.09, 0.1, 0.14, 0.14, 0.23, 0.16, 0.17, 0.22, 0.15, 0.33, 0.15), tolerance = 1e-07) expect_equal(simResult4$futilityPerStage[1, ], c(0.13, 0.11, 0.06, 0.03, 0.01, 0.02, 0.01), tolerance = 1e-07) expect_equal(simResult4$earlyStop[1, ], c(0.13, 0.12, 0.09, 0.07, 0.11, 0.17, 0.29), tolerance = 1e-07) expect_equal(simResult4$successPerStage[1, ], c(0, 0.01, 0.03, 0.04, 0.1, 0.15, 0.28), tolerance = 1e-07) expect_equal(simResult4$successPerStage[2, ], c(0.09, 0.14, 0.34, 0.55, 0.71, 0.75, 0.65), tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$selectedPopulations)), c(1, 0.33, 1, 0.46, 1, 0.55, 1, 0.54, 1, 0.61, 1, 0.75, 1, 0.56, 1, 0.64, 1, 0.51, 1, 0.5, 1, 0.52, 1, 0.34, 1, 0.23, 1, 0.24), tolerance = 1e-07) expect_equal(simResult4$numberOfPopulations[1, ], c(2, 2, 2, 2, 2, 2, 2)) expect_equal(simResult4$numberOfPopulations[2, ], c(1.1149425, 1.1022727, 1.1538462, 1.1397849, 1.0674157, 1.1807229, 1.1267606), tolerance = 1e-07) expect_equal(simResult4$expectedNumberOfEvents, c(109.6, 110.4, 112.8, 114.4, 111.2, 106.4, 96.8), tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$singleNumberOfEventsPerStage)), c(14.163599, 41.987823, 15.080284, 51.115783, 16, 53.626374, 16.925752, 54.19654, 17.861157, 63.084929, 18.810731, 68.25655, 19.780244, 66.330305, 25.836401, 38.012177, 24.919716, 28.884217, 24, 26.373626, 23.074248, 25.80346, 22.138843, 16.915071, 21.189269, 11.74345, 20.219756, 13.669695), tolerance = 1e-07) expect_equal(simResult4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult4$conditionalPowerAchieved[2, ], c(0.15686358, 0.32782429, 0.33623436, 0.44508606, 0.58784224, 0.60322388, 0.65604368), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult4), NA))) expect_output(print(simResult4)$show()) invisible(capture.output(expect_error(summary(simResult4), NA))) expect_output(summary(simResult4)$show()) suppressWarnings(simResult4CodeBased <- eval(parse(text = getObjectRCode(simResult4, stringWrapParagraphWidth = NULL)))) expect_equal(simResult4CodeBased$eventsPerStage, simResult4$eventsPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$iterations, simResult4$iterations, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectAtLeastOne, simResult4$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectedPopulationsPerStage, simResult4$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$futilityPerStage, simResult4$futilityPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$earlyStop, simResult4$earlyStop, tolerance = 1e-05) expect_equal(simResult4CodeBased$successPerStage, simResult4$successPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$selectedPopulations, simResult4$selectedPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$numberOfPopulations, simResult4$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$expectedNumberOfEvents, simResult4$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult4CodeBased$singleNumberOfEventsPerStage, simResult4$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$conditionalPowerAchieved, simResult4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult4), "character") df <- as.data.frame(simResult4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationEnrichmentSurvival': gMax = 3", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:subsec:adaptiveClosedTestProcedureEnrichment} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:simulationEnrichmentSurvivalEvents} # @refFS[Formula]{fs:simulationEnrichmentSurvivalEventsWithControls} # @refFS[Formula]{fs:simulationEnrichmentSurvivalLogRanks} # @refFS[Formula]{fs:simulationEnrichmentSurvivalAdjustedPrevalances} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} subGroups <- c("S1", "S12", "S2", "R") prevalences <- c(0.20, 0.30, 0.40, 0.1) piControls <- rep(0.2, 4) hazardRatios <- matrix(c( 1.432, 1.432, 1.943, 1.943, 1.432, 1.432, 1.432, 1.432, 1.943, 1.943, 1.943, 1.943, 1.943, 2.569, 1.943, 2.569 ), ncol = 4) effectList1 <- list(subGroups = subGroups, prevalences = prevalences, hazardRatios = hazardRatios) design <- getDesignInverseNormal(informationRates = c(0.4, 0.8, 1), typeOfDesign = "noEarlyEfficacy") suppressWarnings(simResult1 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(20, 40, 50), effectList = effectList1, maxNumberOfIterations = 100, allocationRatioPlanned = 1, typeOfSelection = "best", intersectionTest = "Sidak", directionUpper = TRUE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult1' with expected results expect_equal(simResult1$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simResult1$iterations[2, ], c(100, 100, 100, 100)) expect_equal(simResult1$iterations[3, ], c(100, 100, 100, 100)) expect_equal(simResult1$rejectAtLeastOne, c(0.37, 0.35, 0.41, 0.35), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$rejectedPopulationsPerStage)), c(0, 0, 0.06, 0, 0, 0.06, 0, 0, 0.12, 0, 0, 0.15, 0, 0, 0.27, 0, 0, 0.12, 0, 0, 0.17, 0, 0, 0.12, 0, 0, 0.04, 0, 0, 0.17, 0, 0, 0.12, 0, 0, 0.08), tolerance = 1e-07) expect_equal(simResult1$futilityStop, c(0, 0, 0, 0)) expect_equal(simResult1$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult1$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult1$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(simResult1$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(simResult1$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult1$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult1$successPerStage[3, ], c(0.37, 0.35, 0.41, 0.35), tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$selectedPopulations)), c(1, 0.33, 0.33, 1, 0.21, 0.21, 1, 0.29, 0.29, 1, 0.48, 0.48, 1, 0.49, 0.49, 1, 0.34, 0.34, 1, 0.42, 0.42, 1, 0.29, 0.29, 1, 0.18, 0.18, 1, 0.45, 0.45, 1, 0.29, 0.29, 1, 0.23, 0.23), tolerance = 1e-07) expect_equal(simResult1$numberOfPopulations[1, ], c(3, 3, 3, 3)) expect_equal(simResult1$numberOfPopulations[2, ], c(1, 1, 1, 1)) expect_equal(simResult1$numberOfPopulations[3, ], c(1, 1, 1, 1)) expect_equal(simResult1$expectedNumberOfEvents, c(50, 50, 50, 50)) expect_equal(unlist(as.list(simResult1$singleNumberOfEventsPerStage)), c(3.6197209, 3.2915498, 1.6457749, 3.5373259, 3.2717967, 1.6358983, 4.2198086, 3.8135488, 1.9067744, 4.1271956, 5.2358276, 2.6179138, 8.7605581, 7.6271207, 3.8135604, 8.5611432, 8.0506265, 4.0253132, 8.4396172, 7.6333921, 3.816696, 8.2543912, 5.4792526, 2.7396263, 5.4295814, 8.6871044, 4.3435522, 5.3059889, 7.509583, 3.7547915, 5.23067, 7.9411869, 3.9705935, 5.1158714, 8.7093352, 4.3546676, 2.1901395, 0.39422512, 0.19711256, 2.595542, 1.1679939, 0.58399695, 2.1099043, 0.61187224, 0.30593612, 2.5025418, 0.57558462, 0.28779231), tolerance = 1e-07) expect_equal(simResult1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult1$conditionalPowerAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simResult1$conditionalPowerAchieved[3, ], c(0.40782052, 0.41157037, 0.44953877, 0.471601), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult1), NA))) expect_output(print(simResult1)$show()) invisible(capture.output(expect_error(summary(simResult1), NA))) expect_output(summary(simResult1)$show()) suppressWarnings(simResult1CodeBased <- eval(parse(text = getObjectRCode(simResult1, stringWrapParagraphWidth = NULL)))) expect_equal(simResult1CodeBased$eventsPerStage, simResult1$eventsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$iterations, simResult1$iterations, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectAtLeastOne, simResult1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectedPopulationsPerStage, simResult1$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$futilityStop, simResult1$futilityStop, tolerance = 1e-05) expect_equal(simResult1CodeBased$futilityPerStage, simResult1$futilityPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$earlyStop, simResult1$earlyStop, tolerance = 1e-05) expect_equal(simResult1CodeBased$successPerStage, simResult1$successPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$selectedPopulations, simResult1$selectedPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$numberOfPopulations, simResult1$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$expectedNumberOfEvents, simResult1$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult1CodeBased$singleNumberOfEventsPerStage, simResult1$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$conditionalPowerAchieved, simResult1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult1), "character") df <- as.data.frame(simResult1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult2 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(20, 40, 50), effectList = effectList1, maxNumberOfIterations = 100, allocationRatioPlanned = 1, typeOfSelection = "rbest", rValue = 2, intersectionTest = "Bonferroni", directionUpper = TRUE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult2' with expected results expect_equal(simResult2$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simResult2$iterations[2, ], c(85, 86, 87, 88)) expect_equal(simResult2$iterations[3, ], c(79, 81, 84, 85)) expect_equal(simResult2$rejectAtLeastOne, c(0.26, 0.26, 0.31, 0.28), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$rejectedPopulationsPerStage)), c(0, 0, 0.04, 0, 0, 0.06, 0, 0, 0.08, 0, 0, 0.07, 0, 0, 0.22, 0, 0, 0.16, 0, 0, 0.18, 0, 0, 0.13, 0, 0, 0.17, 0, 0, 0.21, 0, 0, 0.29, 0, 0, 0.25), tolerance = 1e-07) expect_equal(simResult2$futilityStop, c(0.21, 0.19, 0.16, 0.15), tolerance = 1e-07) expect_equal(simResult2$futilityPerStage[1, ], c(0.15, 0.14, 0.13, 0.12), tolerance = 1e-07) expect_equal(simResult2$futilityPerStage[2, ], c(0.06, 0.05, 0.03, 0.03), tolerance = 1e-07) expect_equal(simResult2$earlyStop[1, ], c(0.15, 0.14, 0.13, 0.12), tolerance = 1e-07) expect_equal(simResult2$earlyStop[2, ], c(0.06, 0.05, 0.03, 0.03), tolerance = 1e-07) expect_equal(simResult2$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult2$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult2$successPerStage[3, ], c(0.17, 0.17, 0.24, 0.17), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$selectedPopulations)), c(1, 0.43, 0.4, 1, 0.41, 0.38, 1, 0.4, 0.38, 1, 0.54, 0.51, 1, 0.67, 0.61, 1, 0.61, 0.57, 1, 0.58, 0.56, 1, 0.5, 0.49, 1, 0.6, 0.57, 1, 0.7, 0.67, 1, 0.76, 0.74, 1, 0.72, 0.7), tolerance = 1e-07) expect_equal(simResult2$numberOfPopulations[1, ], c(3, 3, 3, 3)) expect_equal(simResult2$numberOfPopulations[2, ], c(2, 2, 2, 2)) expect_equal(simResult2$numberOfPopulations[3, ], c(2, 2, 2, 2)) expect_equal(simResult2$expectedNumberOfEvents, c(44.9, 45.3, 45.8, 46.1), tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$singleNumberOfEventsPerStage)), c(3.6197209, 3.7506414, 1.8718405, 3.5373259, 3.63547, 1.8142515, 4.2198086, 4.2827326, 2.1395276, 4.1271956, 4.2345201, 2.1156817, 8.7605581, 9.0774157, 4.5302851, 8.5611432, 8.7986745, 4.3909064, 8.4396172, 8.5654651, 4.2790552, 8.2543912, 8.4690401, 4.2313635, 5.4295814, 5.625962, 2.8077608, 5.3059889, 5.453205, 2.7213772, 5.23067, 5.3086675, 2.6520546, 5.1158714, 5.2489056, 2.6224964, 2.1901395, 1.5459808, 0.79011363, 2.595542, 2.1126504, 1.0734649, 2.1099043, 1.8431348, 0.9293626, 2.5025418, 2.0475342, 1.0304584), tolerance = 1e-07) expect_equal(simResult2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult2$conditionalPowerAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simResult2$conditionalPowerAchieved[3, ], c(0.41272973, 0.44447643, 0.46315477, 0.46991584), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult2), NA))) expect_output(print(simResult2)$show()) invisible(capture.output(expect_error(summary(simResult2), NA))) expect_output(summary(simResult2)$show()) suppressWarnings(simResult2CodeBased <- eval(parse(text = getObjectRCode(simResult2, stringWrapParagraphWidth = NULL)))) expect_equal(simResult2CodeBased$eventsPerStage, simResult2$eventsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$iterations, simResult2$iterations, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectAtLeastOne, simResult2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectedPopulationsPerStage, simResult2$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$futilityStop, simResult2$futilityStop, tolerance = 1e-05) expect_equal(simResult2CodeBased$futilityPerStage, simResult2$futilityPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$earlyStop, simResult2$earlyStop, tolerance = 1e-05) expect_equal(simResult2CodeBased$successPerStage, simResult2$successPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$selectedPopulations, simResult2$selectedPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$numberOfPopulations, simResult2$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$expectedNumberOfEvents, simResult2$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult2CodeBased$singleNumberOfEventsPerStage, simResult2$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$conditionalPowerAchieved, simResult2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult2), "character") df <- as.data.frame(simResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult3 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(20, 40, 50), effectList = effectList1, maxNumberOfIterations = 100, allocationRatioPlanned = 1, typeOfSelection = "epsilon", epsilonValue = 0.2, intersectionTest = "Simes", directionUpper = TRUE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult3' with expected results expect_equal(simResult3$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simResult3$iterations[2, ], c(100, 100, 100, 100)) expect_equal(simResult3$iterations[3, ], c(100, 100, 100, 100)) expect_equal(simResult3$rejectAtLeastOne, c(0.36, 0.35, 0.44, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$rejectedPopulationsPerStage)), c(0, 0, 0.07, 0, 0, 0.06, 0, 0, 0.13, 0, 0, 0.17, 0, 0, 0.27, 0, 0, 0.18, 0, 0, 0.17, 0, 0, 0.12, 0, 0, 0.05, 0, 0, 0.17, 0, 0, 0.16, 0, 0, 0.12), tolerance = 1e-07) expect_equal(simResult3$futilityStop, c(0, 0, 0, 0)) expect_equal(simResult3$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult3$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult3$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(simResult3$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(simResult3$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult3$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult3$successPerStage[3, ], c(0.36, 0.34, 0.41, 0.38), tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$selectedPopulations)), c(1, 0.42, 0.32, 1, 0.32, 0.22, 1, 0.36, 0.32, 1, 0.54, 0.5, 1, 0.59, 0.53, 1, 0.52, 0.48, 1, 0.55, 0.48, 1, 0.36, 0.31, 1, 0.38, 0.27, 1, 0.5, 0.45, 1, 0.48, 0.42, 1, 0.36, 0.33), tolerance = 1e-07) expect_equal(simResult3$numberOfPopulations[1, ], c(3, 3, 3, 3)) expect_equal(simResult3$numberOfPopulations[2, ], c(1.39, 1.34, 1.39, 1.26), tolerance = 1e-07) expect_equal(simResult3$numberOfPopulations[3, ], c(1.12, 1.15, 1.22, 1.14), tolerance = 1e-07) expect_equal(simResult3$expectedNumberOfEvents, c(50, 50, 50, 50)) expect_equal(unlist(as.list(simResult3$singleNumberOfEventsPerStage)), c(3.6197209, 3.780682, 1.6893108, 3.5373259, 3.2506085, 1.5558983, 4.2198086, 4.1738404, 2.0706939, 4.1271956, 5.1472378, 2.6010146, 8.7605581, 7.5733107, 3.8740056, 8.5611432, 8.2799243, 4.1487871, 8.4396172, 7.5735103, 3.7333396, 8.2543912, 5.8114801, 2.8436611, 5.4295814, 7.8137543, 4.1410147, 5.3059889, 7.1716962, 3.7113176, 5.23067, 7.2398952, 3.7528866, 5.1158714, 8.140367, 4.1424049, 2.1901395, 0.83225302, 0.29566884, 2.595542, 1.297771, 0.58399695, 2.1099043, 1.0127541, 0.4430799, 2.5025418, 0.90091505, 0.4129194), tolerance = 1e-07) expect_equal(simResult3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult3$conditionalPowerAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simResult3$conditionalPowerAchieved[3, ], c(0.38601776, 0.39399786, 0.42333822, 0.45516082), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult3), NA))) expect_output(print(simResult3)$show()) invisible(capture.output(expect_error(summary(simResult3), NA))) expect_output(summary(simResult3)$show()) suppressWarnings(simResult3CodeBased <- eval(parse(text = getObjectRCode(simResult3, stringWrapParagraphWidth = NULL)))) expect_equal(simResult3CodeBased$eventsPerStage, simResult3$eventsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$iterations, simResult3$iterations, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectAtLeastOne, simResult3$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectedPopulationsPerStage, simResult3$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$futilityStop, simResult3$futilityStop, tolerance = 1e-05) expect_equal(simResult3CodeBased$futilityPerStage, simResult3$futilityPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$earlyStop, simResult3$earlyStop, tolerance = 1e-05) expect_equal(simResult3CodeBased$successPerStage, simResult3$successPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$selectedPopulations, simResult3$selectedPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$numberOfPopulations, simResult3$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$expectedNumberOfEvents, simResult3$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult3CodeBased$singleNumberOfEventsPerStage, simResult3$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$conditionalPowerAchieved, simResult3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult3), "character") df <- as.data.frame(simResult3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } effectList2 <- list( subGroups = subGroups, prevalences = prevalences, piControls = piControls, hazardRatios = hazardRatios ) suppressWarnings(simResult5 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(20, 40, 50), effectList = effectList2, maxNumberOfIterations = 100, allocationRatioPlanned = 1, typeOfSelection = "epsilon", epsilonValue = 0.2, intersectionTest = "Simes", directionUpper = TRUE, seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult5' with expected results expect_equal(simResult5$iterations[1, ], c(100, 100, 100, 100)) expect_equal(simResult5$iterations[2, ], c(100, 100, 100, 100)) expect_equal(simResult5$iterations[3, ], c(100, 100, 100, 100)) expect_equal(simResult5$rejectAtLeastOne, c(0.36, 0.33, 0.44, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(simResult5$rejectedPopulationsPerStage)), c(0, 0, 0.06, 0, 0, 0.07, 0, 0, 0.13, 0, 0, 0.16, 0, 0, 0.27, 0, 0, 0.19, 0, 0, 0.17, 0, 0, 0.12, 0, 0, 0.05, 0, 0, 0.13, 0, 0, 0.16, 0, 0, 0.13), tolerance = 1e-07) expect_equal(simResult5$futilityStop, c(0, 0, 0, 0)) expect_equal(simResult5$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult5$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult5$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(simResult5$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(simResult5$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(simResult5$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(simResult5$successPerStage[3, ], c(0.36, 0.33, 0.41, 0.38), tolerance = 1e-07) expect_equal(unlist(as.list(simResult5$selectedPopulations)), c(1, 0.4, 0.31, 1, 0.32, 0.22, 1, 0.36, 0.32, 1, 0.54, 0.49, 1, 0.59, 0.52, 1, 0.54, 0.5, 1, 0.55, 0.48, 1, 0.37, 0.32, 1, 0.38, 0.27, 1, 0.51, 0.45, 1, 0.48, 0.42, 1, 0.38, 0.35), tolerance = 1e-07) expect_equal(simResult5$numberOfPopulations[1, ], c(3, 3, 3, 3)) expect_equal(simResult5$numberOfPopulations[2, ], c(1.37, 1.37, 1.39, 1.29), tolerance = 1e-07) expect_equal(simResult5$numberOfPopulations[3, ], c(1.1, 1.17, 1.22, 1.16), tolerance = 1e-07) expect_equal(simResult5$expectedNumberOfEvents, c(50, 50, 50, 50)) expect_equal(unlist(as.list(simResult5$singleNumberOfEventsPerStage)), c(3.6945761, 3.7323515, 1.6794688, 3.6346639, 3.3378849, 1.5777994, 4.1778232, 4.109865, 2.0374383, 4.1120508, 5.0603333, 2.5559297, 8.6108478, 7.5016338, 3.8269345, 8.4712121, 8.1386608, 4.0963486, 8.3556464, 7.4828019, 3.6874504, 8.2241016, 5.8022236, 2.8385927, 5.5418642, 7.9479841, 4.2029806, 5.4519959, 7.2779689, 3.7763732, 5.3776189, 7.4046555, 3.8364399, 5.2929579, 8.236505, 4.1905719, 2.1527119, 0.81803054, 0.29061611, 2.4421281, 1.2454853, 0.54947882, 2.0889116, 1.0026776, 0.43867143, 2.3708897, 0.90093809, 0.4149057), tolerance = 1e-07) expect_equal(simResult5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(simResult5$conditionalPowerAchieved[2, ], c(0, 0, 0, 0)) expect_equal(simResult5$conditionalPowerAchieved[3, ], c(0.38413966, 0.38845505, 0.4197785, 0.45252776), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult5), NA))) expect_output(print(simResult5)$show()) invisible(capture.output(expect_error(summary(simResult5), NA))) expect_output(summary(simResult5)$show()) suppressWarnings(simResult5CodeBased <- eval(parse(text = getObjectRCode(simResult5, stringWrapParagraphWidth = NULL)))) expect_equal(simResult5CodeBased$eventsPerStage, simResult5$eventsPerStage, tolerance = 1e-05) expect_equal(simResult5CodeBased$iterations, simResult5$iterations, tolerance = 1e-05) expect_equal(simResult5CodeBased$rejectAtLeastOne, simResult5$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult5CodeBased$rejectedPopulationsPerStage, simResult5$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult5CodeBased$futilityStop, simResult5$futilityStop, tolerance = 1e-05) expect_equal(simResult5CodeBased$futilityPerStage, simResult5$futilityPerStage, tolerance = 1e-05) expect_equal(simResult5CodeBased$earlyStop, simResult5$earlyStop, tolerance = 1e-05) expect_equal(simResult5CodeBased$successPerStage, simResult5$successPerStage, tolerance = 1e-05) expect_equal(simResult5CodeBased$selectedPopulations, simResult5$selectedPopulations, tolerance = 1e-05) expect_equal(simResult5CodeBased$numberOfPopulations, simResult5$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult5CodeBased$expectedNumberOfEvents, simResult5$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult5CodeBased$singleNumberOfEventsPerStage, simResult5$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult5CodeBased$conditionalPowerAchieved, simResult5$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult5), "character") df <- as.data.frame(simResult5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationEnrichmentSurvival': gMax = 4", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:subsec:adaptiveClosedTestProcedureEnrichment} # @refFS[Sec.]{fs:subsec:intersectionTestsEnrichment} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:adjustedPValueBonferroniEnrichment} # @refFS[Formula]{fs:adjustedPValueSidakEnrichment} # @refFS[Formula]{fs:adjustedPValueSimesEnrichment} # @refFS[Formula]{fs:simulationEnrichmentSurvivalEvents} # @refFS[Formula]{fs:simulationEnrichmentSurvivalEventsWithControls} # @refFS[Formula]{fs:simulationEnrichmentSurvivalLogRanks} # @refFS[Formula]{fs:simulationEnrichmentSurvivalAdjustedPrevalances} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} subGroups <- c("S1", "S2", "S3", "S12", "S13", "S23", "S123", "R") prevalences <- c(0.1, 0.05, 0.1, 0.15, 0.1, 0.15, 0.3, 0.05) hazardRatios <- matrix(c(seq(1, 1.75, 0.25), seq(1, 1.75, 0.25)), ncol = 8) effectList1 <- list(subGroups = subGroups, prevalences = prevalences, hazardRatios = hazardRatios) design <- getDesignInverseNormal(informationRates = c(0.4, 1), typeOfDesign = "noEarlyEfficacy") suppressWarnings(simResult1 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(100, 200), effectList = effectList1, maxNumberOfIterations = 100, allocationRatioPlanned = 2, typeOfSelection = "epsilon", epsilonValue = 0.15, adaptations = c(T), intersectionTest = "Sidak", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult1' with expected results expect_equal(simResult1$iterations[1, ], 100) expect_equal(simResult1$iterations[2, ], 100) expect_equal(simResult1$rejectAtLeastOne, 0.57, tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$rejectedPopulationsPerStage)), c(0, 0.16, 0, 0.3, 0, 0.2, 0, 0.12), tolerance = 1e-07) expect_equal(simResult1$futilityPerStage[1, ], 0) expect_equal(simResult1$earlyStop[1, ], 0) expect_equal(simResult1$successPerStage[1, ], 0) expect_equal(simResult1$successPerStage[2, ], 0.51, tolerance = 1e-07) expect_equal(unlist(as.list(simResult1$selectedPopulations)), c(1, 0.45, 1, 0.58, 1, 0.44, 1, 0.44), tolerance = 1e-07) expect_equal(simResult1$numberOfPopulations[1, ], 4) expect_equal(simResult1$numberOfPopulations[2, ], 1.91, tolerance = 1e-07) expect_equal(simResult1$expectedNumberOfEvents, 200) expect_equal(unlist(as.list(simResult1$singleNumberOfEventsPerStage)), c(7.8947368, 5.6940341, 4.6052632, 3.8122818, 10.526316, 7.8604058, 17.763158, 18.4485, 7.8947368, 7.9435029, 13.815789, 14.40569, 31.578947, 39.230322, 5.9210526, 2.6052632), tolerance = 1e-07) expect_equal(simResult1$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simResult1$conditionalPowerAchieved[2, ], 0.42595791, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult1), NA))) expect_output(print(simResult1)$show()) invisible(capture.output(expect_error(summary(simResult1), NA))) expect_output(summary(simResult1)$show()) suppressWarnings(simResult1CodeBased <- eval(parse(text = getObjectRCode(simResult1, stringWrapParagraphWidth = NULL)))) expect_equal(simResult1CodeBased$eventsPerStage, simResult1$eventsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$iterations, simResult1$iterations, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectAtLeastOne, simResult1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult1CodeBased$rejectedPopulationsPerStage, simResult1$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$futilityPerStage, simResult1$futilityPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$earlyStop, simResult1$earlyStop, tolerance = 1e-05) expect_equal(simResult1CodeBased$successPerStage, simResult1$successPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$selectedPopulations, simResult1$selectedPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$numberOfPopulations, simResult1$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult1CodeBased$expectedNumberOfEvents, simResult1$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult1CodeBased$singleNumberOfEventsPerStage, simResult1$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult1CodeBased$conditionalPowerAchieved, simResult1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult1), "character") df <- as.data.frame(simResult1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult2 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(100, 200), effectList = effectList1, maxNumberOfIterations = 100, allocationRatioPlanned = 2, typeOfSelection = "rBest", rValue = 2, adaptations = c(T), intersectionTest = "Bonferroni", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult2' with expected results expect_equal(simResult2$iterations[1, ], 100) expect_equal(simResult2$iterations[2, ], 90) expect_equal(simResult2$rejectAtLeastOne, 0.51, tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$rejectedPopulationsPerStage)), c(0, 0.19, 0, 0.3, 0, 0.22, 0, 0.17), tolerance = 1e-07) expect_equal(simResult2$futilityPerStage[1, ], 0.1, tolerance = 1e-07) expect_equal(simResult2$earlyStop[1, ], 0.1, tolerance = 1e-07) expect_equal(simResult2$successPerStage[1, ], 0) expect_equal(simResult2$successPerStage[2, ], 0.37, tolerance = 1e-07) expect_equal(unlist(as.list(simResult2$selectedPopulations)), c(1, 0.41, 1, 0.59, 1, 0.39, 1, 0.41), tolerance = 1e-07) expect_equal(simResult2$numberOfPopulations[1, ], 4) expect_equal(simResult2$numberOfPopulations[2, ], 2) expect_equal(simResult2$expectedNumberOfEvents, 190) expect_equal(unlist(as.list(simResult2$singleNumberOfEventsPerStage)), c(7.8947368, 6.4805425, 4.6052632, 4.5696194, 10.526316, 8.690946, 17.763158, 19.390381, 7.8947368, 8.6179471, 13.815789, 15.081407, 31.578947, 34.471788, 5.9210526, 2.6973684), tolerance = 1e-07) expect_equal(simResult2$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simResult2$conditionalPowerAchieved[2, ], 0.47328657, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult2), NA))) expect_output(print(simResult2)$show()) invisible(capture.output(expect_error(summary(simResult2), NA))) expect_output(summary(simResult2)$show()) suppressWarnings(simResult2CodeBased <- eval(parse(text = getObjectRCode(simResult2, stringWrapParagraphWidth = NULL)))) expect_equal(simResult2CodeBased$eventsPerStage, simResult2$eventsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$iterations, simResult2$iterations, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectAtLeastOne, simResult2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult2CodeBased$rejectedPopulationsPerStage, simResult2$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$futilityPerStage, simResult2$futilityPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$earlyStop, simResult2$earlyStop, tolerance = 1e-05) expect_equal(simResult2CodeBased$successPerStage, simResult2$successPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$selectedPopulations, simResult2$selectedPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$numberOfPopulations, simResult2$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult2CodeBased$expectedNumberOfEvents, simResult2$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult2CodeBased$singleNumberOfEventsPerStage, simResult2$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult2CodeBased$conditionalPowerAchieved, simResult2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult2), "character") df <- as.data.frame(simResult2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } piControls <- (1:8) / 10 effectList2 <- list( subGroups = subGroups, prevalences = prevalences, piControls = piControls, hazardRatios = hazardRatios ) suppressWarnings(simResult3 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(100, 200), effectList = effectList2, maxNumberOfIterations = 100, allocationRatioPlanned = 2, typeOfSelection = "rBest", rValue = 2, adaptations = c(T), intersectionTest = "Bonferroni", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult3' with expected results expect_equal(simResult3$iterations[1, ], 100) expect_equal(simResult3$iterations[2, ], 88) expect_equal(simResult3$rejectAtLeastOne, 0.47, tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$rejectedPopulationsPerStage)), c(0, 0.2, 0, 0.29, 0, 0.16, 0, 0.21), tolerance = 1e-07) expect_equal(simResult3$futilityPerStage[1, ], 0.12, tolerance = 1e-07) expect_equal(simResult3$earlyStop[1, ], 0.12, tolerance = 1e-07) expect_equal(simResult3$successPerStage[1, ], 0) expect_equal(simResult3$successPerStage[2, ], 0.39, tolerance = 1e-07) expect_equal(unlist(as.list(simResult3$selectedPopulations)), c(1, 0.44, 1, 0.56, 1, 0.31, 1, 0.45), tolerance = 1e-07) expect_equal(simResult3$numberOfPopulations[1, ], 4) expect_equal(simResult3$numberOfPopulations[2, ], 2) expect_equal(simResult3$expectedNumberOfEvents, 188) expect_equal(unlist(as.list(simResult3$singleNumberOfEventsPerStage)), c(1.7600545, 1.5465305, 2.0147151, 1.9724525, 6.6217708, 5.219482, 13.921345, 14.858066, 8.8002727, 9.3924133, 17.28183, 18.444665, 41.737343, 44.545708, 7.8626685, 4.0206827), tolerance = 1e-07) expect_equal(simResult3$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simResult3$conditionalPowerAchieved[2, ], 0.50609156, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult3), NA))) expect_output(print(simResult3)$show()) invisible(capture.output(expect_error(summary(simResult3), NA))) expect_output(summary(simResult3)$show()) suppressWarnings(simResult3CodeBased <- eval(parse(text = getObjectRCode(simResult3, stringWrapParagraphWidth = NULL)))) expect_equal(simResult3CodeBased$eventsPerStage, simResult3$eventsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$iterations, simResult3$iterations, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectAtLeastOne, simResult3$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult3CodeBased$rejectedPopulationsPerStage, simResult3$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$futilityPerStage, simResult3$futilityPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$earlyStop, simResult3$earlyStop, tolerance = 1e-05) expect_equal(simResult3CodeBased$successPerStage, simResult3$successPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$selectedPopulations, simResult3$selectedPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$numberOfPopulations, simResult3$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult3CodeBased$expectedNumberOfEvents, simResult3$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult3CodeBased$singleNumberOfEventsPerStage, simResult3$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult3CodeBased$conditionalPowerAchieved, simResult3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult3), "character") df <- as.data.frame(simResult3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } suppressWarnings(simResult4 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(100, 200), effectList = effectList2, maxNumberOfIterations = 100, allocationRatioPlanned = 2, typeOfSelection = "rBest", rValue = 2, adaptations = c(T), intersectionTest = "Bonferroni", seed = 123 )) ## Comparison of the results of SimulationResultsEnrichmentSurvival object 'simResult4' with expected results expect_equal(simResult4$iterations[1, ], 100) expect_equal(simResult4$iterations[2, ], 88) expect_equal(simResult4$rejectAtLeastOne, 0.47, tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$rejectedPopulationsPerStage)), c(0, 0.2, 0, 0.29, 0, 0.16, 0, 0.21), tolerance = 1e-07) expect_equal(simResult4$futilityPerStage[1, ], 0.12, tolerance = 1e-07) expect_equal(simResult4$earlyStop[1, ], 0.12, tolerance = 1e-07) expect_equal(simResult4$successPerStage[1, ], 0) expect_equal(simResult4$successPerStage[2, ], 0.39, tolerance = 1e-07) expect_equal(unlist(as.list(simResult4$selectedPopulations)), c(1, 0.44, 1, 0.56, 1, 0.31, 1, 0.45), tolerance = 1e-07) expect_equal(simResult4$numberOfPopulations[1, ], 4) expect_equal(simResult4$numberOfPopulations[2, ], 2) expect_equal(simResult4$expectedNumberOfEvents, 188) expect_equal(unlist(as.list(simResult4$singleNumberOfEventsPerStage)), c(1.7600545, 1.5465305, 2.0147151, 1.9724525, 6.6217708, 5.219482, 13.921345, 14.858066, 8.8002727, 9.3924133, 17.28183, 18.444665, 41.737343, 44.545708, 7.8626685, 4.0206827), tolerance = 1e-07) expect_equal(simResult4$conditionalPowerAchieved[1, ], NA_real_) expect_equal(simResult4$conditionalPowerAchieved[2, ], 0.50609156, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(simResult4), NA))) expect_output(print(simResult4)$show()) invisible(capture.output(expect_error(summary(simResult4), NA))) expect_output(summary(simResult4)$show()) suppressWarnings(simResult4CodeBased <- eval(parse(text = getObjectRCode(simResult4, stringWrapParagraphWidth = NULL)))) expect_equal(simResult4CodeBased$eventsPerStage, simResult4$eventsPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$iterations, simResult4$iterations, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectAtLeastOne, simResult4$rejectAtLeastOne, tolerance = 1e-05) expect_equal(simResult4CodeBased$rejectedPopulationsPerStage, simResult4$rejectedPopulationsPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$futilityPerStage, simResult4$futilityPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$earlyStop, simResult4$earlyStop, tolerance = 1e-05) expect_equal(simResult4CodeBased$successPerStage, simResult4$successPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$selectedPopulations, simResult4$selectedPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$numberOfPopulations, simResult4$numberOfPopulations, tolerance = 1e-05) expect_equal(simResult4CodeBased$expectedNumberOfEvents, simResult4$expectedNumberOfEvents, tolerance = 1e-05) expect_equal(simResult4CodeBased$singleNumberOfEventsPerStage, simResult4$singleNumberOfEventsPerStage, tolerance = 1e-05) expect_equal(simResult4CodeBased$conditionalPowerAchieved, simResult4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(simResult4), "character") df <- as.data.frame(simResult4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(simResult4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationEnrichmentSurvival': comparison of base and enrichment for inverse normal", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:simulationEnrichmentSurvivalEvents} # @refFS[Formula]{fs:simulationEnrichmentSurvivalEventsWithControls} # @refFS[Formula]{fs:simulationEnrichmentSurvivalLogRanks} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} effectList <- list( subGroups = "F", prevalences = 1, stDevs = 1.3, hazardRatios = matrix(seq(0.6, 1, 0.05), ncol = 1) ) design <- getDesignInverseNormal( informationRates = c(0.3, 0.7, 1), typeOfDesign = "asKD", gammaA = 2.4 ) suppressWarnings(x1 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(50, 100, 180), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 1, directionUpper = FALSE, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA, 10, 10), maxNumberOfEventsPerStage = c(NA, 100, 100), seed = 123 )) x2 <- getSimulationSurvival(design, plannedEvents = c(50, 100, 180), hazardRatio = seq(0.6, 1, 0.05), directionUpper = FALSE, maxNumberOfSubjects = 1500, maxNumberOfIterations = 100, allocation1 = 1, allocation2 = 1, longTimeSimulationAllowed = TRUE, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA, 10, 10), maxNumberOfEventsPerStage = c(NA, 100, 100), seed = 123 ) comp1 <- x2$overallReject - x1$rejectAtLeastOne ## Comparison of the results of numeric object 'comp1' with expected results expect_equal(comp1, c(-0.05, 0.03, 0.03, 0.01, 0.07, 0.08, 0.05, -0.05, -0.02), tolerance = 1e-07) comp2 <- x2$conditionalPowerAchieved - x1$conditionalPowerAchieved ## Comparison of the results of matrixarray object 'comp2' with expected results expect_equal(comp2[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(comp2[2, ], c(-0.022566213, -0.0056751237, 0.047207778, 0.035251356, 0.033740719, -0.051453144, 0.039406427, 0.0072692294, -0.022722897), tolerance = 1e-07) expect_equal(comp2[3, ], c(0.025359011, -0.021253382, 0.092581664, -0.080566447, 0.087298305, -0.050787114, 0.070673698, 0.019777739, -0.019114098), tolerance = 1e-07) comp3 <- x2$expectedNumberOfEvents - x1$expectedNumberOfEvents ## Comparison of the results of numeric object 'comp3' with expected results expect_equal(comp3, c(5.6713987, 8.8976119, -9.7670181, -2.0326559, -2.7081522, -0.88153519, -5.5780096, 3.3199537, 1.2334371), tolerance = 1e-07) }) test_that("'getSimulationEnrichmentSurvival': comparison of base and enrichment for Fisher combination", { .skipTestIfDisabled() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:enrichmentDesigns} # @refFS[Sec.]{fs:sec:simulationFunctions} # @refFS[Sec.]{fs:sec:simulatingEnrichmentDesigns} # @refFS[Sec.]{fs:sec:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:simulationEnrichmentSurvivalEvents} # @refFS[Formula]{fs:simulationEnrichmentSurvivalEventsWithControls} # @refFS[Formula]{fs:simulationEnrichmentSurvivalLogRanks} # @refFS[Formula]{fs:simulationEnrichmentSelections} # @refFS[Formula]{fs:simulatingEnrichmentEffectSpecification} # @refFS[Formula]{fs:enrichmentRejectionRule} effectList <- list( subGroups = "F", prevalences = 1, stDevs = 1.3, hazardRatios = matrix(seq(0.6, 1, 0.05), ncol = 1) ) design <- getDesignFisher(informationRates = c(0.3, 0.6, 1)) suppressWarnings(x1 <- getSimulationEnrichmentSurvival(design, plannedEvents = c(50, 100, 180), effectList = effectList, maxNumberOfIterations = 100, allocationRatioPlanned = 1, directionUpper = FALSE, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA, 10, 10), maxNumberOfEventsPerStage = c(NA, 100, 100), seed = 123 )) x2 <- getSimulationSurvival(design, plannedEvents = c(50, 100, 180), hazardRatio = seq(0.6, 1, 0.05), directionUpper = FALSE, maxNumberOfSubjects = 1500, maxNumberOfIterations = 100, allocation1 = 1, allocation2 = 1, conditionalPower = 0.8, minNumberOfEventsPerStage = c(NA, 10, 10), maxNumberOfEventsPerStage = c(NA, 100, 100), seed = 123 ) comp4 <- x2$overallReject - x1$rejectAtLeastOne ## Comparison of the results of numeric object 'comp4' with expected results expect_equal(comp4, c(-0.08, 0.02, 0.12, 0.02, 0.04, 0.04, 0.04, -0.03, 0), tolerance = 1e-07) comp5 <- x2$conditionalPowerAchieved - x1$conditionalPowerAchieved ## Comparison of the results of matrixarray object 'comp5' with expected results expect_equal(comp5[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(comp5[2, ], c(-0.067329229, 0.0040653837, 0.025600632, 0.024680224, 0.025189093, -0.043591198, 0.033525993, -0.0055417344, -0.031790612), tolerance = 1e-07) expect_equal(comp5[3, ], c(0.012384997, 0.030980232, 0.047012202, -0.035304718, 0.068468504, 0.00374058, 0.042913189, -0.015210788, -0.017776302), tolerance = 1e-07) comp6 <- x2$expectedNumberOfEvents - x1$expectedNumberOfEvents ## Comparison of the results of numeric object 'comp6' with expected results expect_equal(comp6, c(5.1347448, 9.1286427, -16.823834, -1.3136156, 0.71128925, 1.9694657, -7.1208497, -0.94699441, -0.085337992), tolerance = 1e-07) }) rpact/tests/testthat/test-class_analysis_dataset.R0000644000176200001440000060023114372422771022215 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-class_analysis_dataset.R ## | Creation date: 06 February 2023, 12:04:06 ## | File version: $Revision: 6810 $ ## | Last changed: $Date: 2023-02-13 12:58:47 +0100 (Mo, 13 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Class 'Dataset'") test_that("Usage of 'getDataset'", { # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetMeans} datasetOfMeans1 <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) ## Comparison of the results of DatasetMeans object 'datasetOfMeans1' with expected results expect_equal(datasetOfMeans1$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfMeans1$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfMeans1$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetOfMeans1$sampleSizes, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans1$means, c(1, 1.4, 1.1, 1.5, 1, 3, 1, 2.5), tolerance = 1e-07) expect_equal(datasetOfMeans1$stDevs, c(1, 1, 2, 2, 2, 2, 1.3, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans1$overallSampleSizes, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans1$overallMeans, c(1, 1.4, 1.0333333, 1.4371429, 1.02, 2.0403509, 1.0166667, 2.1257143), tolerance = 1e-07) expect_equal(datasetOfMeans1$overallStDevs, c(1, 1, 1.3814998, 1.4254175, 1.6391506, 1.8228568, 1.5786638, 1.7387056), tolerance = 1e-07) .skipTestIfDisabled() if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetOfMeans1), NA))) expect_output(print(datasetOfMeans1)$show()) invisible(capture.output(expect_error(summary(datasetOfMeans1), NA))) expect_output(summary(datasetOfMeans1)$show()) datasetOfMeans1CodeBased <- eval(parse(text = getObjectRCode(datasetOfMeans1, stringWrapParagraphWidth = NULL))) expect_equal(datasetOfMeans1CodeBased$stages, datasetOfMeans1$stages, tolerance = 1e-05) expect_equal(datasetOfMeans1CodeBased$groups, datasetOfMeans1$groups, tolerance = 1e-05) expect_equal(datasetOfMeans1CodeBased$subsets, datasetOfMeans1$subsets, tolerance = 1e-05) expect_equal(datasetOfMeans1CodeBased$sampleSizes, datasetOfMeans1$sampleSizes, tolerance = 1e-05) expect_equal(datasetOfMeans1CodeBased$means, datasetOfMeans1$means, tolerance = 1e-05) expect_equal(datasetOfMeans1CodeBased$stDevs, datasetOfMeans1$stDevs, tolerance = 1e-05) expect_equal(datasetOfMeans1CodeBased$overallSampleSizes, datasetOfMeans1$overallSampleSizes, tolerance = 1e-05) expect_equal(datasetOfMeans1CodeBased$overallMeans, datasetOfMeans1$overallMeans, tolerance = 1e-05) expect_equal(datasetOfMeans1CodeBased$overallStDevs, datasetOfMeans1$overallStDevs, tolerance = 1e-05) expect_type(names(datasetOfMeans1), "character") df <- as.data.frame(datasetOfMeans1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetOfMeans1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetOfMeans1$.data' with expected results expect_equal(datasetOfMeans1$.data$stage, factor(c(1, 1, 2, 2, 3, 3, 4, 4))) expect_equal(datasetOfMeans1$.data$group, factor(c(1, 2, 1, 2, 1, 2, 1, 2))) expect_equal(datasetOfMeans1$.data$subset, factor(c(NA, NA, NA, NA, NA, NA, NA, NA))) expect_equal(datasetOfMeans1$.data$sampleSize, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans1$.data$mean, c(1, 1.4, 1.1, 1.5, 1, 3, 1, 2.5), tolerance = 1e-07) expect_equal(datasetOfMeans1$.data$stDev, c(1, 1, 2, 2, 2, 2, 1.3, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans1$.data$overallSampleSize, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans1$.data$overallMean, c(1, 1.4, 1.0333333, 1.4371429, 1.02, 2.0403509, 1.0166667, 2.1257143), tolerance = 1e-07) expect_equal(datasetOfMeans1$.data$overallStDev, c(1, 1, 1.3814998, 1.4254175, 1.6391506, 1.8228568, 1.5786638, 1.7387056), tolerance = 1e-07) expect_equal(factor(datasetOfMeans1$stages), datasetOfMeans1$.data$stage, tolerance = 1e-07) expect_equal(factor(datasetOfMeans1$groups), datasetOfMeans1$.data$group, tolerance = 1e-07) expect_equal(datasetOfMeans1$sampleSizes, datasetOfMeans1$.data$sampleSize, tolerance = 1e-07) expect_equal(datasetOfMeans1$means, datasetOfMeans1$.data$mean, tolerance = 1e-07) expect_equal(datasetOfMeans1$stDevs, datasetOfMeans1$.data$stDev, tolerance = 1e-07) expect_equal(datasetOfMeans1$overallSampleSizes, datasetOfMeans1$.data$overallSampleSize, tolerance = 1e-07) expect_equal(datasetOfMeans1$overallMeans, datasetOfMeans1$.data$overallMean, tolerance = 1e-07) expect_equal(datasetOfMeans1$overallStDevs, datasetOfMeans1$.data$overallStDev, tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans1) ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes1, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults1$overallSampleSizes2, c(22, 35, 57, 70, NA_real_)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults1), NA))) expect_output(print(x$stageResults1)$show()) invisible(capture.output(expect_error(summary(x$stageResults1), NA))) expect_output(summary(x$stageResults1)$show()) x$stageResults1CodeBased <- eval(parse(text = getObjectRCode(x$stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults1CodeBased$overallTestStatistics, x$stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallMeans1, x$stageResults1$overallMeans1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallMeans2, x$stageResults1$overallMeans2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallStDevs1, x$stageResults1$overallStDevs1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallStDevs2, x$stageResults1$overallStDevs2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes1, x$stageResults1$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes2, x$stageResults1$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$testStatistics, x$stageResults1$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$pValues, x$stageResults1$pValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$effectSizes, x$stageResults1$effectSizes, tolerance = 1e-05) expect_type(names(x$stageResults1), "character") df <- as.data.frame(x$stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes1, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults2$overallSampleSizes2, c(22, 35, 57, 70, NA_real_)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults2), NA))) expect_output(print(x$stageResults2)$show()) invisible(capture.output(expect_error(summary(x$stageResults2), NA))) expect_output(summary(x$stageResults2)$show()) x$stageResults2CodeBased <- eval(parse(text = getObjectRCode(x$stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults2CodeBased$overallTestStatistics, x$stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallMeans1, x$stageResults2$overallMeans1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallMeans2, x$stageResults2$overallMeans2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallStDevs1, x$stageResults2$overallStDevs1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallStDevs2, x$stageResults2$overallStDevs2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes1, x$stageResults2$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes2, x$stageResults2$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$testStatistics, x$stageResults2$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$pValues, x$stageResults2$pValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$effectSizes, x$stageResults2$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$combInverseNormal, x$stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$weightsInverseNormal, x$stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(x$stageResults2), "character") df <- as.data.frame(x$stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes1, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults3$overallSampleSizes2, c(22, 35, 57, 70, NA_real_)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults3), NA))) expect_output(print(x$stageResults3)$show()) invisible(capture.output(expect_error(summary(x$stageResults3), NA))) expect_output(summary(x$stageResults3)$show()) x$stageResults3CodeBased <- eval(parse(text = getObjectRCode(x$stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults3CodeBased$overallTestStatistics, x$stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallMeans1, x$stageResults3$overallMeans1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallMeans2, x$stageResults3$overallMeans2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallStDevs1, x$stageResults3$overallStDevs1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallStDevs2, x$stageResults3$overallStDevs2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes1, x$stageResults3$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes2, x$stageResults3$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$testStatistics, x$stageResults3$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$pValues, x$stageResults3$pValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$effectSizes, x$stageResults3$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$combFisher, x$stageResults3$combFisher, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$weightsFisher, x$stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(x$stageResults3), "character") df <- as.data.frame(x$stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } datasetOfMeans2 <- getDataset(data.frame( stages = 1:4, n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) )) x <- getMultipleStageResultsForDataset(datasetOfMeans2) ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes1, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults1$overallSampleSizes2, c(22, 35, 57, 70, NA_real_)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults1), NA))) expect_output(print(x$stageResults1)$show()) invisible(capture.output(expect_error(summary(x$stageResults1), NA))) expect_output(summary(x$stageResults1)$show()) x$stageResults1CodeBased <- eval(parse(text = getObjectRCode(x$stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults1CodeBased$overallTestStatistics, x$stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallMeans1, x$stageResults1$overallMeans1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallMeans2, x$stageResults1$overallMeans2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallStDevs1, x$stageResults1$overallStDevs1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallStDevs2, x$stageResults1$overallStDevs2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes1, x$stageResults1$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes2, x$stageResults1$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$testStatistics, x$stageResults1$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$pValues, x$stageResults1$pValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$effectSizes, x$stageResults1$effectSizes, tolerance = 1e-05) expect_type(names(x$stageResults1), "character") df <- as.data.frame(x$stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes1, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults2$overallSampleSizes2, c(22, 35, 57, 70, NA_real_)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults2), NA))) expect_output(print(x$stageResults2)$show()) invisible(capture.output(expect_error(summary(x$stageResults2), NA))) expect_output(summary(x$stageResults2)$show()) x$stageResults2CodeBased <- eval(parse(text = getObjectRCode(x$stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults2CodeBased$overallTestStatistics, x$stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallMeans1, x$stageResults2$overallMeans1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallMeans2, x$stageResults2$overallMeans2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallStDevs1, x$stageResults2$overallStDevs1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallStDevs2, x$stageResults2$overallStDevs2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes1, x$stageResults2$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes2, x$stageResults2$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$testStatistics, x$stageResults2$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$pValues, x$stageResults2$pValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$effectSizes, x$stageResults2$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$combInverseNormal, x$stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$weightsInverseNormal, x$stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(x$stageResults2), "character") df <- as.data.frame(x$stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans1, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallMeans2, c(1.4, 1.4371429, 2.0403509, 2.1257143, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs1, c(1, 1.3814998, 1.6391506, 1.5786638, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs2, c(1, 1.4254175, 1.8228568, 1.7387056, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes1, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults3$overallSampleSizes2, c(22, 35, 57, 70, NA_real_)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(-0.4, -0.40380952, -1.0203509, -1.1090476, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults3), NA))) expect_output(print(x$stageResults3)$show()) invisible(capture.output(expect_error(summary(x$stageResults3), NA))) expect_output(summary(x$stageResults3)$show()) x$stageResults3CodeBased <- eval(parse(text = getObjectRCode(x$stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults3CodeBased$overallTestStatistics, x$stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallMeans1, x$stageResults3$overallMeans1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallMeans2, x$stageResults3$overallMeans2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallStDevs1, x$stageResults3$overallStDevs1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallStDevs2, x$stageResults3$overallStDevs2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes1, x$stageResults3$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes2, x$stageResults3$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$testStatistics, x$stageResults3$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$pValues, x$stageResults3$pValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$effectSizes, x$stageResults3$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$combFisher, x$stageResults3$combFisher, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$weightsFisher, x$stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(x$stageResults3), "character") df <- as.data.frame(x$stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } datasetOfMeans3 <- getDataset( overallSampleSizes1 = c(22, 33, 55, 66), overallSampleSizes2 = c(22, 35, 57, 70), overallMeans1 = c(1, 1.033333, 1.02, 1.016667), overallMeans2 = c(1.4, 1.437143, 2.040351, 2.125714), overallStDevs1 = c(1, 1.381500, 1.639151, 1.578664), overallStDevs2 = c(1, 1.425418, 1.822857, 1.738706) ) ## Comparison of the results of DatasetMeans object 'datasetOfMeans3' with expected results expect_equal(datasetOfMeans3$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfMeans3$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfMeans3$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetOfMeans3$sampleSizes, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans3$means, c(1, 1.4, 1.099999, 1.5000004, 1.0000005, 3.0000001, 1.000002, 2.4999979), tolerance = 1e-07) expect_equal(datasetOfMeans3$stDevs, c(1, 1, 2.0000005, 2.0000009, 2.0000005, 1.9999999, 1.2999989, 1.3000023), tolerance = 1e-07) expect_equal(datasetOfMeans3$overallSampleSizes, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans3$overallMeans, c(1, 1.4, 1.033333, 1.437143, 1.02, 2.040351, 1.016667, 2.125714), tolerance = 1e-07) expect_equal(datasetOfMeans3$overallStDevs, c(1, 1, 1.3815, 1.425418, 1.639151, 1.822857, 1.578664, 1.738706), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetOfMeans3), NA))) expect_output(print(datasetOfMeans3)$show()) invisible(capture.output(expect_error(summary(datasetOfMeans3), NA))) expect_output(summary(datasetOfMeans3)$show()) datasetOfMeans3CodeBased <- eval(parse(text = getObjectRCode(datasetOfMeans3, stringWrapParagraphWidth = NULL))) expect_equal(datasetOfMeans3CodeBased$stages, datasetOfMeans3$stages, tolerance = 1e-05) expect_equal(datasetOfMeans3CodeBased$groups, datasetOfMeans3$groups, tolerance = 1e-05) expect_equal(datasetOfMeans3CodeBased$subsets, datasetOfMeans3$subsets, tolerance = 1e-05) expect_equal(datasetOfMeans3CodeBased$sampleSizes, datasetOfMeans3$sampleSizes, tolerance = 1e-05) expect_equal(datasetOfMeans3CodeBased$means, datasetOfMeans3$means, tolerance = 1e-05) expect_equal(datasetOfMeans3CodeBased$stDevs, datasetOfMeans3$stDevs, tolerance = 1e-05) expect_equal(datasetOfMeans3CodeBased$overallSampleSizes, datasetOfMeans3$overallSampleSizes, tolerance = 1e-05) expect_equal(datasetOfMeans3CodeBased$overallMeans, datasetOfMeans3$overallMeans, tolerance = 1e-05) expect_equal(datasetOfMeans3CodeBased$overallStDevs, datasetOfMeans3$overallStDevs, tolerance = 1e-05) expect_type(names(datasetOfMeans3), "character") df <- as.data.frame(datasetOfMeans3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetOfMeans3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetOfMeans3$.data' with expected results expect_equal(datasetOfMeans3$.data$stage, factor(c(1, 1, 2, 2, 3, 3, 4, 4))) expect_equal(datasetOfMeans3$.data$group, factor(c(1, 2, 1, 2, 1, 2, 1, 2))) expect_equal(datasetOfMeans3$.data$subset, factor(c(NA, NA, NA, NA, NA, NA, NA, NA))) expect_equal(datasetOfMeans3$.data$sampleSize, c(22, 22, 11, 13, 22, 22, 11, 13)) expect_equal(datasetOfMeans3$.data$mean, c(1, 1.4, 1.099999, 1.5000004, 1.0000005, 3.0000001, 1.000002, 2.4999979), tolerance = 1e-07) expect_equal(datasetOfMeans3$.data$stDev, c(1, 1, 2.0000005, 2.0000009, 2.0000005, 1.9999999, 1.2999989, 1.3000023), tolerance = 1e-07) expect_equal(datasetOfMeans3$.data$overallSampleSize, c(22, 22, 33, 35, 55, 57, 66, 70)) expect_equal(datasetOfMeans3$.data$overallMean, c(1, 1.4, 1.033333, 1.437143, 1.02, 2.040351, 1.016667, 2.125714), tolerance = 1e-07) expect_equal(datasetOfMeans3$.data$overallStDev, c(1, 1, 1.3815, 1.425418, 1.639151, 1.822857, 1.578664, 1.738706), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans3) ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans1, c(1, 1.033333, 1.02, 1.016667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallMeans2, c(1.4, 1.437143, 2.040351, 2.125714, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs1, c(1, 1.3815, 1.639151, 1.578664, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs2, c(1, 1.425418, 1.822857, 1.738706, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes1, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults1$overallSampleSizes2, c(22, 35, 57, 70, NA_real_)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(-0.4, -0.40381, -1.020351, -1.109047, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults1), NA))) expect_output(print(x$stageResults1)$show()) invisible(capture.output(expect_error(summary(x$stageResults1), NA))) expect_output(summary(x$stageResults1)$show()) x$stageResults1CodeBased <- eval(parse(text = getObjectRCode(x$stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults1CodeBased$overallTestStatistics, x$stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallMeans1, x$stageResults1$overallMeans1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallMeans2, x$stageResults1$overallMeans2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallStDevs1, x$stageResults1$overallStDevs1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallStDevs2, x$stageResults1$overallStDevs2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes1, x$stageResults1$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes2, x$stageResults1$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$testStatistics, x$stageResults1$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$pValues, x$stageResults1$pValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$effectSizes, x$stageResults1$effectSizes, tolerance = 1e-05) expect_type(names(x$stageResults1), "character") df <- as.data.frame(x$stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans1, c(1, 1.033333, 1.02, 1.016667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallMeans2, c(1.4, 1.437143, 2.040351, 2.125714, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs1, c(1, 1.3815, 1.639151, 1.578664, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs2, c(1, 1.425418, 1.822857, 1.738706, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes1, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults2$overallSampleSizes2, c(22, 35, 57, 70, NA_real_)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(-0.4, -0.40381, -1.020351, -1.109047, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults2), NA))) expect_output(print(x$stageResults2)$show()) invisible(capture.output(expect_error(summary(x$stageResults2), NA))) expect_output(summary(x$stageResults2)$show()) x$stageResults2CodeBased <- eval(parse(text = getObjectRCode(x$stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults2CodeBased$overallTestStatistics, x$stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallMeans1, x$stageResults2$overallMeans1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallMeans2, x$stageResults2$overallMeans2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallStDevs1, x$stageResults2$overallStDevs1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallStDevs2, x$stageResults2$overallStDevs2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes1, x$stageResults2$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes2, x$stageResults2$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$testStatistics, x$stageResults2$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$pValues, x$stageResults2$pValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$effectSizes, x$stageResults2$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$combInverseNormal, x$stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$weightsInverseNormal, x$stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(x$stageResults2), "character") df <- as.data.frame(x$stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans1, c(1, 1.033333, 1.02, 1.016667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallMeans2, c(1.4, 1.437143, 2.040351, 2.125714, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs1, c(1, 1.3815, 1.639151, 1.578664, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs2, c(1, 1.425418, 1.822857, 1.738706, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes1, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults3$overallSampleSizes2, c(22, 35, 57, 70, NA_real_)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(-0.4, -0.40381, -1.020351, -1.109047, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults3), NA))) expect_output(print(x$stageResults3)$show()) invisible(capture.output(expect_error(summary(x$stageResults3), NA))) expect_output(summary(x$stageResults3)$show()) x$stageResults3CodeBased <- eval(parse(text = getObjectRCode(x$stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults3CodeBased$overallTestStatistics, x$stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallMeans1, x$stageResults3$overallMeans1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallMeans2, x$stageResults3$overallMeans2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallStDevs1, x$stageResults3$overallStDevs1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallStDevs2, x$stageResults3$overallStDevs2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes1, x$stageResults3$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes2, x$stageResults3$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$testStatistics, x$stageResults3$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$pValues, x$stageResults3$pValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$effectSizes, x$stageResults3$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$combFisher, x$stageResults3$combFisher, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$weightsFisher, x$stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(x$stageResults3), "character") df <- as.data.frame(x$stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Creation of a dataset of means using stage wise data (one group)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetMeans} datasetOfMeans4 <- getDataset( n = c(22, 11, 22, 11), means = c(1, 1.1, 1, 1), stDevs = c(1, 2, 2, 1.3) ) ## Comparison of the results of DatasetMeans object 'datasetOfMeans4' with expected results expect_equal(datasetOfMeans4$stages, c(1, 2, 3, 4)) expect_equal(datasetOfMeans4$groups, c(1, 1, 1, 1)) expect_equal(datasetOfMeans4$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetOfMeans4$sampleSizes, c(22, 11, 22, 11)) expect_equal(datasetOfMeans4$means, c(1, 1.1, 1, 1), tolerance = 1e-07) expect_equal(datasetOfMeans4$stDevs, c(1, 2, 2, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans4$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(datasetOfMeans4$overallMeans, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(datasetOfMeans4$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetOfMeans4), NA))) expect_output(print(datasetOfMeans4)$show()) invisible(capture.output(expect_error(summary(datasetOfMeans4), NA))) expect_output(summary(datasetOfMeans4)$show()) datasetOfMeans4CodeBased <- eval(parse(text = getObjectRCode(datasetOfMeans4, stringWrapParagraphWidth = NULL))) expect_equal(datasetOfMeans4CodeBased$stages, datasetOfMeans4$stages, tolerance = 1e-05) expect_equal(datasetOfMeans4CodeBased$groups, datasetOfMeans4$groups, tolerance = 1e-05) expect_equal(datasetOfMeans4CodeBased$subsets, datasetOfMeans4$subsets, tolerance = 1e-05) expect_equal(datasetOfMeans4CodeBased$sampleSizes, datasetOfMeans4$sampleSizes, tolerance = 1e-05) expect_equal(datasetOfMeans4CodeBased$means, datasetOfMeans4$means, tolerance = 1e-05) expect_equal(datasetOfMeans4CodeBased$stDevs, datasetOfMeans4$stDevs, tolerance = 1e-05) expect_equal(datasetOfMeans4CodeBased$overallSampleSizes, datasetOfMeans4$overallSampleSizes, tolerance = 1e-05) expect_equal(datasetOfMeans4CodeBased$overallMeans, datasetOfMeans4$overallMeans, tolerance = 1e-05) expect_equal(datasetOfMeans4CodeBased$overallStDevs, datasetOfMeans4$overallStDevs, tolerance = 1e-05) expect_type(names(datasetOfMeans4), "character") df <- as.data.frame(datasetOfMeans4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetOfMeans4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetOfMeans4$.data' with expected results expect_equal(datasetOfMeans4$.data$stage, factor(c(1, 2, 3, 4))) expect_equal(datasetOfMeans4$.data$group, factor(c(1, 1, 1, 1))) expect_equal(datasetOfMeans4$.data$subset, factor(c(NA, NA, NA, NA))) expect_equal(datasetOfMeans4$.data$sampleSize, c(22, 11, 22, 11)) expect_equal(datasetOfMeans4$.data$mean, c(1, 1.1, 1, 1), tolerance = 1e-07) expect_equal(datasetOfMeans4$.data$stDev, c(1, 2, 2, 1.3), tolerance = 1e-07) expect_equal(datasetOfMeans4$.data$overallSampleSize, c(22, 33, 55, 66)) expect_equal(datasetOfMeans4$.data$overallMean, c(1, 1.0333333, 1.02, 1.0166667), tolerance = 1e-07) expect_equal(datasetOfMeans4$.data$overallStDev, c(1, 1.3814998, 1.6391506, 1.5786638), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans4) ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults1), NA))) expect_output(print(x$stageResults1)$show()) invisible(capture.output(expect_error(summary(x$stageResults1), NA))) expect_output(summary(x$stageResults1)$show()) x$stageResults1CodeBased <- eval(parse(text = getObjectRCode(x$stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults1CodeBased$overallTestStatistics, x$stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallMeans, x$stageResults1$overallMeans, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallStDevs, x$stageResults1$overallStDevs, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes, x$stageResults1$overallSampleSizes, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$testStatistics, x$stageResults1$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$pValues, x$stageResults1$pValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$effectSizes, x$stageResults1$effectSizes, tolerance = 1e-05) expect_type(names(x$stageResults1), "character") df <- as.data.frame(x$stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults2), NA))) expect_output(print(x$stageResults2)$show()) invisible(capture.output(expect_error(summary(x$stageResults2), NA))) expect_output(summary(x$stageResults2)$show()) x$stageResults2CodeBased <- eval(parse(text = getObjectRCode(x$stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults2CodeBased$overallTestStatistics, x$stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallMeans, x$stageResults2$overallMeans, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallStDevs, x$stageResults2$overallStDevs, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes, x$stageResults2$overallSampleSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$testStatistics, x$stageResults2$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$pValues, x$stageResults2$pValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$effectSizes, x$stageResults2$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$combInverseNormal, x$stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$weightsInverseNormal, x$stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(x$stageResults2), "character") df <- as.data.frame(x$stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs, c(1, 1.3814998, 1.6391506, 1.5786638, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(1, 1.0333333, 1.02, 1.0166667, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults3), NA))) expect_output(print(x$stageResults3)$show()) invisible(capture.output(expect_error(summary(x$stageResults3), NA))) expect_output(summary(x$stageResults3)$show()) x$stageResults3CodeBased <- eval(parse(text = getObjectRCode(x$stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults3CodeBased$overallTestStatistics, x$stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallMeans, x$stageResults3$overallMeans, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallStDevs, x$stageResults3$overallStDevs, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes, x$stageResults3$overallSampleSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$testStatistics, x$stageResults3$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$pValues, x$stageResults3$pValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$effectSizes, x$stageResults3$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$combFisher, x$stageResults3$combFisher, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$weightsFisher, x$stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(x$stageResults3), "character") df <- as.data.frame(x$stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Creation of a dataset of means using overall data (one group)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetMeans} datasetOfMeans5 <- getDataset( overallSampleSizes = c(22, 33, 55, 66), overallMeans = c(1.000, 1.033, 1.020, 1.017), overallStDevs = c(1.00, 1.38, 1.64, 1.58) ) ## Comparison of the results of DatasetMeans object 'datasetOfMeans5' with expected results expect_equal(datasetOfMeans5$stages, c(1, 2, 3, 4)) expect_equal(datasetOfMeans5$groups, c(1, 1, 1, 1)) expect_equal(datasetOfMeans5$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetOfMeans5$sampleSizes, c(22, 11, 22, 11)) expect_equal(datasetOfMeans5$means, c(1, 1.099, 1.0005, 1.002), tolerance = 1e-07) expect_equal(datasetOfMeans5$stDevs, c(1, 1.9967205, 2.003374, 1.3047847), tolerance = 1e-07) expect_equal(datasetOfMeans5$overallSampleSizes, c(22, 33, 55, 66)) expect_equal(datasetOfMeans5$overallMeans, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(datasetOfMeans5$overallStDevs, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetOfMeans5), NA))) expect_output(print(datasetOfMeans5)$show()) invisible(capture.output(expect_error(summary(datasetOfMeans5), NA))) expect_output(summary(datasetOfMeans5)$show()) datasetOfMeans5CodeBased <- eval(parse(text = getObjectRCode(datasetOfMeans5, stringWrapParagraphWidth = NULL))) expect_equal(datasetOfMeans5CodeBased$stages, datasetOfMeans5$stages, tolerance = 1e-05) expect_equal(datasetOfMeans5CodeBased$groups, datasetOfMeans5$groups, tolerance = 1e-05) expect_equal(datasetOfMeans5CodeBased$subsets, datasetOfMeans5$subsets, tolerance = 1e-05) expect_equal(datasetOfMeans5CodeBased$sampleSizes, datasetOfMeans5$sampleSizes, tolerance = 1e-05) expect_equal(datasetOfMeans5CodeBased$means, datasetOfMeans5$means, tolerance = 1e-05) expect_equal(datasetOfMeans5CodeBased$stDevs, datasetOfMeans5$stDevs, tolerance = 1e-05) expect_equal(datasetOfMeans5CodeBased$overallSampleSizes, datasetOfMeans5$overallSampleSizes, tolerance = 1e-05) expect_equal(datasetOfMeans5CodeBased$overallMeans, datasetOfMeans5$overallMeans, tolerance = 1e-05) expect_equal(datasetOfMeans5CodeBased$overallStDevs, datasetOfMeans5$overallStDevs, tolerance = 1e-05) expect_type(names(datasetOfMeans5), "character") df <- as.data.frame(datasetOfMeans5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetOfMeans5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetOfMeans5$.data' with expected results expect_equal(datasetOfMeans5$.data$stage, factor(c(1, 2, 3, 4))) expect_equal(datasetOfMeans5$.data$group, factor(c(1, 1, 1, 1))) expect_equal(datasetOfMeans5$.data$subset, factor(c(NA, NA, NA, NA))) expect_equal(datasetOfMeans5$.data$sampleSize, c(22, 11, 22, 11)) expect_equal(datasetOfMeans5$.data$mean, c(1, 1.099, 1.0005, 1.002), tolerance = 1e-07) expect_equal(datasetOfMeans5$.data$stDev, c(1, 1.9967205, 2.003374, 1.3047847), tolerance = 1e-07) expect_equal(datasetOfMeans5$.data$overallSampleSize, c(22, 33, 55, 66)) expect_equal(datasetOfMeans5$.data$overallMean, c(1, 1.033, 1.02, 1.017), tolerance = 1e-07) expect_equal(datasetOfMeans5$.data$overallStDev, c(1, 1.38, 1.64, 1.58), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetOfMeans5) ## Comparison of the results of StageResultsMeans object 'x$stageResults1' with expected results expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallMeans, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallStDevs, c(1, 1.38, 1.64, 1.58, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallSampleSizes, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults1), NA))) expect_output(print(x$stageResults1)$show()) invisible(capture.output(expect_error(summary(x$stageResults1), NA))) expect_output(summary(x$stageResults1)$show()) x$stageResults1CodeBased <- eval(parse(text = getObjectRCode(x$stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults1CodeBased$overallTestStatistics, x$stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallMeans, x$stageResults1$overallMeans, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallStDevs, x$stageResults1$overallStDevs, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes, x$stageResults1$overallSampleSizes, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$testStatistics, x$stageResults1$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$pValues, x$stageResults1$pValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$effectSizes, x$stageResults1$effectSizes, tolerance = 1e-05) expect_type(names(x$stageResults1), "character") df <- as.data.frame(x$stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsMeans object 'x$stageResults2' with expected results expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallMeans, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallStDevs, c(1, 1.38, 1.64, 1.58, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallSampleSizes, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults2), NA))) expect_output(print(x$stageResults2)$show()) invisible(capture.output(expect_error(summary(x$stageResults2), NA))) expect_output(summary(x$stageResults2)$show()) x$stageResults2CodeBased <- eval(parse(text = getObjectRCode(x$stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults2CodeBased$overallTestStatistics, x$stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallMeans, x$stageResults2$overallMeans, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallStDevs, x$stageResults2$overallStDevs, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes, x$stageResults2$overallSampleSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$testStatistics, x$stageResults2$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$pValues, x$stageResults2$pValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$effectSizes, x$stageResults2$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$combInverseNormal, x$stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$weightsInverseNormal, x$stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(x$stageResults2), "character") df <- as.data.frame(x$stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsMeans object 'x$stageResults3' with expected results expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallMeans, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallStDevs, c(1, 1.38, 1.64, 1.58, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallSampleSizes, c(22, 33, 55, 66, NA_real_)) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(1, 1.033, 1.02, 1.017, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults3), NA))) expect_output(print(x$stageResults3)$show()) invisible(capture.output(expect_error(summary(x$stageResults3), NA))) expect_output(summary(x$stageResults3)$show()) x$stageResults3CodeBased <- eval(parse(text = getObjectRCode(x$stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults3CodeBased$overallTestStatistics, x$stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallMeans, x$stageResults3$overallMeans, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallStDevs, x$stageResults3$overallStDevs, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes, x$stageResults3$overallSampleSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$testStatistics, x$stageResults3$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$pValues, x$stageResults3$pValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$effectSizes, x$stageResults3$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$combFisher, x$stageResults3$combFisher, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$weightsFisher, x$stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(x$stageResults3), "character") df <- as.data.frame(x$stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Trim command works as expected for means", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetMeans} datasetOfMeansExpected <- getDataset( n1 = c(13, 25), n2 = c(15, NA), n3 = c(14, 27), n4 = c(12, 29), means1 = c(24.2, 22.2), means2 = c(18.8, NA), means3 = c(26.7, 27.7), means4 = c(9.2, 12.2), stDevs1 = c(24.4, 22.1), stDevs2 = c(21.2, NA), stDevs3 = c(25.6, 23.2), stDevs4 = c(21.5, 22.7) ) datasetOfMeans <- getDataset( n1 = c(13, 25), n2 = c(15, NA), n3 = c(14, 27), n4 = c(12, 29), means1 = c(24.2, 22.2), means2 = c(18.8, NA), means3 = c(26.7, 27.7), means4 = c(9.2, 12.2), stDevs1 = c(24.4, 22.1), stDevs2 = c(21.2, NA), stDevs3 = c(25.6, 23.2), stDevs4 = c(21.5, 22.7) ) datasetOfMeans$.fillWithNAs(4) datasetOfMeans$.trim(2) expect_equal(datasetOfMeans$stages, datasetOfMeansExpected$stages) expect_equal(datasetOfMeans$groups, datasetOfMeansExpected$groups) expect_equal(datasetOfMeans$overallMeans, datasetOfMeansExpected$overallMeans) expect_equal(datasetOfMeans$means, datasetOfMeansExpected$means) expect_equal(datasetOfMeans$overallStDevs, datasetOfMeansExpected$overallStDevs) expect_equal(datasetOfMeans$stDevs, datasetOfMeansExpected$stDevs) expect_equal(datasetOfMeans$.data$stage, datasetOfMeansExpected$.data$stage) expect_equal(datasetOfMeans$.data$group, datasetOfMeansExpected$.data$group) expect_equal(datasetOfMeans$.data$overallMeans, datasetOfMeansExpected$.data$overallMeans) expect_equal(datasetOfMeans$.data$means, datasetOfMeansExpected$.data$means) expect_equal(datasetOfMeans$.data$overallStDevs, datasetOfMeansExpected$.data$overallStDevs) expect_equal(datasetOfMeans$.data$stDevs, datasetOfMeansExpected$.data$stDevs) }) test_that("Creation of a dataset of rates using stage wise data (one group)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetRates} datasetOfRates1 <- getDataset( n = c(8, 10, 9, 11), events = c(4, 5, 5, 6) ) ## Comparison of the results of DatasetRates object 'datasetOfRates1' with expected results expect_equal(datasetOfRates1$stages, c(1, 2, 3, 4)) expect_equal(datasetOfRates1$groups, c(1, 1, 1, 1)) expect_equal(datasetOfRates1$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetOfRates1$sampleSizes, c(8, 10, 9, 11)) expect_equal(datasetOfRates1$events, c(4, 5, 5, 6)) expect_equal(datasetOfRates1$overallSampleSizes, c(8, 18, 27, 38)) expect_equal(datasetOfRates1$overallEvents, c(4, 9, 14, 20)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetOfRates1), NA))) expect_output(print(datasetOfRates1)$show()) invisible(capture.output(expect_error(summary(datasetOfRates1), NA))) expect_output(summary(datasetOfRates1)$show()) datasetOfRates1CodeBased <- eval(parse(text = getObjectRCode(datasetOfRates1, stringWrapParagraphWidth = NULL))) expect_equal(datasetOfRates1CodeBased$stages, datasetOfRates1$stages, tolerance = 1e-05) expect_equal(datasetOfRates1CodeBased$groups, datasetOfRates1$groups, tolerance = 1e-05) expect_equal(datasetOfRates1CodeBased$subsets, datasetOfRates1$subsets, tolerance = 1e-05) expect_equal(datasetOfRates1CodeBased$sampleSizes, datasetOfRates1$sampleSizes, tolerance = 1e-05) expect_equal(datasetOfRates1CodeBased$events, datasetOfRates1$events, tolerance = 1e-05) expect_equal(datasetOfRates1CodeBased$overallSampleSizes, datasetOfRates1$overallSampleSizes, tolerance = 1e-05) expect_equal(datasetOfRates1CodeBased$overallEvents, datasetOfRates1$overallEvents, tolerance = 1e-05) expect_type(names(datasetOfRates1), "character") df <- as.data.frame(datasetOfRates1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetOfRates1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetOfRates1$.data' with expected results expect_equal(datasetOfRates1$.data$stage, factor(c(1, 2, 3, 4))) expect_equal(datasetOfRates1$.data$group, factor(c(1, 1, 1, 1))) expect_equal(datasetOfRates1$.data$subset, factor(c(NA, NA, NA, NA))) expect_equal(datasetOfRates1$.data$sampleSize, c(8, 10, 9, 11)) expect_equal(datasetOfRates1$.data$event, c(4, 5, 5, 6)) expect_equal(datasetOfRates1$.data$overallSampleSize, c(8, 18, 27, 38)) expect_equal(datasetOfRates1$.data$overallEvent, c(4, 9, 14, 20)) x <- getMultipleStageResultsForDataset(datasetOfRates1, thetaH0 = 0.99) ## Comparison of the results of StageResultsRates object 'x$stageResults1' with expected results expect_equal(x$stageResults1$overallTestStatistics, c(-13.929113, -20.89367, -24.622317, -28.727412, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$overallEvents, c(4, 9, 14, 20, NA_real_)) expect_equal(x$stageResults1$overallSampleSizes, c(8, 18, 27, 38, NA_real_)) expect_equal(x$stageResults1$overallPi1, c(0.5, 0.5, 0.51851852, 0.52631579, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$testStatistics, c(-13.929113, -15.573222, -13.098993, -14.818182, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$pValues, c(1, 1, 1, 1, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults1), NA))) expect_output(print(x$stageResults1)$show()) invisible(capture.output(expect_error(summary(x$stageResults1), NA))) expect_output(summary(x$stageResults1)$show()) x$stageResults1CodeBased <- eval(parse(text = getObjectRCode(x$stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults1CodeBased$overallTestStatistics, x$stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallEvents, x$stageResults1$overallEvents, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes, x$stageResults1$overallSampleSizes, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPi1, x$stageResults1$overallPi1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$testStatistics, x$stageResults1$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$pValues, x$stageResults1$pValues, tolerance = 1e-05) expect_type(names(x$stageResults1), "character") df <- as.data.frame(x$stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsRates object 'x$stageResults2' with expected results expect_equal(x$stageResults2$overallTestStatistics, c(-13.929113, -20.89367, -24.622317, -28.727412, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$overallEvents, c(4, 9, 14, 20, NA_real_)) expect_equal(x$stageResults2$overallSampleSizes, c(8, 18, 27, 38, NA_real_)) expect_equal(x$stageResults2$overallPi1, c(0.5, 0.5, 0.51851852, 0.52631579, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$testStatistics, c(-13.929113, -15.573222, -13.098993, -14.818182, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$combInverseNormal, c(-21.273454, -30.085207, -36.846702, -42.546907, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults2), NA))) expect_output(print(x$stageResults2)$show()) invisible(capture.output(expect_error(summary(x$stageResults2), NA))) expect_output(summary(x$stageResults2)$show()) x$stageResults2CodeBased <- eval(parse(text = getObjectRCode(x$stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults2CodeBased$overallTestStatistics, x$stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallEvents, x$stageResults2$overallEvents, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes, x$stageResults2$overallSampleSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPi1, x$stageResults2$overallPi1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$testStatistics, x$stageResults2$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$pValues, x$stageResults2$pValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$combInverseNormal, x$stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$weightsInverseNormal, x$stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(x$stageResults2), "character") df <- as.data.frame(x$stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsRates object 'x$stageResults3' with expected results expect_equal(x$stageResults3$overallTestStatistics, c(-13.929113, -20.89367, -24.622317, -28.727412, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$overallEvents, c(4, 9, 14, 20, NA_real_)) expect_equal(x$stageResults3$overallSampleSizes, c(8, 18, 27, 38, NA_real_)) expect_equal(x$stageResults3$overallPi1, c(0.5, 0.5, 0.51851852, 0.52631579, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$testStatistics, c(-13.929113, -15.573222, -13.098993, -14.818182, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$combFisher, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults3), NA))) expect_output(print(x$stageResults3)$show()) invisible(capture.output(expect_error(summary(x$stageResults3), NA))) expect_output(summary(x$stageResults3)$show()) x$stageResults3CodeBased <- eval(parse(text = getObjectRCode(x$stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults3CodeBased$overallTestStatistics, x$stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallEvents, x$stageResults3$overallEvents, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes, x$stageResults3$overallSampleSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPi1, x$stageResults3$overallPi1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$testStatistics, x$stageResults3$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$pValues, x$stageResults3$pValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$combFisher, x$stageResults3$combFisher, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$weightsFisher, x$stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(x$stageResults3), "character") df <- as.data.frame(x$stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Creation of a dataset of rates using stage wise data (two groups)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetRates} datasetOfRates2 <- getDataset( n2 = c(8, 10, 9, 11), n1 = c(11, 13, 12, 13), events2 = c(3, 5, 5, 6), events1 = c(10, 10, 12, 12) ) ## Comparison of the results of DatasetRates object 'datasetOfRates2' with expected results expect_equal(datasetOfRates2$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfRates2$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfRates2$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetOfRates2$sampleSizes, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates2$events, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates2$overallSampleSizes, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates2$overallEvents, c(10, 3, 20, 8, 32, 13, 44, 19)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetOfRates2), NA))) expect_output(print(datasetOfRates2)$show()) invisible(capture.output(expect_error(summary(datasetOfRates2), NA))) expect_output(summary(datasetOfRates2)$show()) datasetOfRates2CodeBased <- eval(parse(text = getObjectRCode(datasetOfRates2, stringWrapParagraphWidth = NULL))) expect_equal(datasetOfRates2CodeBased$stages, datasetOfRates2$stages, tolerance = 1e-05) expect_equal(datasetOfRates2CodeBased$groups, datasetOfRates2$groups, tolerance = 1e-05) expect_equal(datasetOfRates2CodeBased$subsets, datasetOfRates2$subsets, tolerance = 1e-05) expect_equal(datasetOfRates2CodeBased$sampleSizes, datasetOfRates2$sampleSizes, tolerance = 1e-05) expect_equal(datasetOfRates2CodeBased$events, datasetOfRates2$events, tolerance = 1e-05) expect_equal(datasetOfRates2CodeBased$overallSampleSizes, datasetOfRates2$overallSampleSizes, tolerance = 1e-05) expect_equal(datasetOfRates2CodeBased$overallEvents, datasetOfRates2$overallEvents, tolerance = 1e-05) expect_type(names(datasetOfRates2), "character") df <- as.data.frame(datasetOfRates2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetOfRates2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetOfRates2$.data' with expected results expect_equal(datasetOfRates2$.data$stage, factor(c(1, 1, 2, 2, 3, 3, 4, 4))) expect_equal(datasetOfRates2$.data$group, factor(c(1, 2, 1, 2, 1, 2, 1, 2))) expect_equal(datasetOfRates2$.data$subset, factor(c(NA, NA, NA, NA, NA, NA, NA, NA))) expect_equal(datasetOfRates2$.data$sampleSize, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates2$.data$event, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates2$.data$overallSampleSize, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates2$.data$overallEvent, c(10, 3, 20, 8, 32, 13, 44, 19)) x <- getMultipleStageResultsForDataset(datasetOfRates2, thetaH0 = 0.99) ## Comparison of the results of StageResultsRates object 'x$stageResults1' with expected results expect_equal(x$stageResults1$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$overallEvents1, c(10, 20, 32, 44, NA_real_)) expect_equal(x$stageResults1$overallEvents2, c(3, 8, 13, 19, NA_real_)) expect_equal(x$stageResults1$overallSampleSizes1, c(11, 24, 36, 49, NA_real_)) expect_equal(x$stageResults1$overallSampleSizes2, c(8, 18, 27, 38, NA_real_)) expect_equal(x$stageResults1$overallPi1, c(0.90909091, 0.83333333, 0.88888889, 0.89795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallPi2, c(0.375, 0.44444444, 0.48148148, 0.5, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults1), NA))) expect_output(print(x$stageResults1)$show()) invisible(capture.output(expect_error(summary(x$stageResults1), NA))) expect_output(summary(x$stageResults1)$show()) x$stageResults1CodeBased <- eval(parse(text = getObjectRCode(x$stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults1CodeBased$overallTestStatistics, x$stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallEvents1, x$stageResults1$overallEvents1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallEvents2, x$stageResults1$overallEvents2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes1, x$stageResults1$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes2, x$stageResults1$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPi1, x$stageResults1$overallPi1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPi2, x$stageResults1$overallPi2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$testStatistics, x$stageResults1$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$pValues, x$stageResults1$pValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$effectSizes, x$stageResults1$effectSizes, tolerance = 1e-05) expect_type(names(x$stageResults1), "character") df <- as.data.frame(x$stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsRates object 'x$stageResults2' with expected results expect_equal(x$stageResults2$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$overallEvents1, c(10, 20, 32, 44, NA_real_)) expect_equal(x$stageResults2$overallEvents2, c(3, 8, 13, 19, NA_real_)) expect_equal(x$stageResults2$overallSampleSizes1, c(11, 24, 36, 49, NA_real_)) expect_equal(x$stageResults2$overallSampleSizes2, c(8, 18, 27, 38, NA_real_)) expect_equal(x$stageResults2$overallPi1, c(0.90909091, 0.83333333, 0.88888889, 0.89795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallPi2, c(0.375, 0.44444444, 0.48148148, 0.5, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(-21.273454, -30.085207, -36.846702, -42.546907, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults2), NA))) expect_output(print(x$stageResults2)$show()) invisible(capture.output(expect_error(summary(x$stageResults2), NA))) expect_output(summary(x$stageResults2)$show()) x$stageResults2CodeBased <- eval(parse(text = getObjectRCode(x$stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults2CodeBased$overallTestStatistics, x$stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallEvents1, x$stageResults2$overallEvents1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallEvents2, x$stageResults2$overallEvents2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes1, x$stageResults2$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes2, x$stageResults2$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPi1, x$stageResults2$overallPi1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPi2, x$stageResults2$overallPi2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$testStatistics, x$stageResults2$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$pValues, x$stageResults2$pValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$effectSizes, x$stageResults2$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$combInverseNormal, x$stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$weightsInverseNormal, x$stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(x$stageResults2), "character") df <- as.data.frame(x$stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsRates object 'x$stageResults3' with expected results expect_equal(x$stageResults3$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$overallEvents1, c(10, 20, 32, 44, NA_real_)) expect_equal(x$stageResults3$overallEvents2, c(3, 8, 13, 19, NA_real_)) expect_equal(x$stageResults3$overallSampleSizes1, c(11, 24, 36, 49, NA_real_)) expect_equal(x$stageResults3$overallSampleSizes2, c(8, 18, 27, 38, NA_real_)) expect_equal(x$stageResults3$overallPi1, c(0.90909091, 0.83333333, 0.88888889, 0.89795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallPi2, c(0.375, 0.44444444, 0.48148148, 0.5, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults3), NA))) expect_output(print(x$stageResults3)$show()) invisible(capture.output(expect_error(summary(x$stageResults3), NA))) expect_output(summary(x$stageResults3)$show()) x$stageResults3CodeBased <- eval(parse(text = getObjectRCode(x$stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults3CodeBased$overallTestStatistics, x$stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallEvents1, x$stageResults3$overallEvents1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallEvents2, x$stageResults3$overallEvents2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes1, x$stageResults3$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes2, x$stageResults3$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPi1, x$stageResults3$overallPi1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPi2, x$stageResults3$overallPi2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$testStatistics, x$stageResults3$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$pValues, x$stageResults3$pValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$effectSizes, x$stageResults3$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$combFisher, x$stageResults3$combFisher, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$weightsFisher, x$stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(x$stageResults3), "character") df <- as.data.frame(x$stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Creation of a dataset of rates using stage wise data (four groups)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetRates} datasetOfRates3 <- getDataset( n1 = c(11, 13, 12, 13), n2 = c(8, 10, 9, 11), n3 = c(7, 10, 8, 9), n4 = c(9, 11, 5, 2), events1 = c(10, 10, 12, 12), events2 = c(3, 5, 5, 6), events3 = c(2, 4, 3, 5), events4 = c(3, 4, 3, 0) ) ## Comparison of the results of DatasetRates object 'datasetOfRates3' with expected results expect_equal(datasetOfRates3$stages, c(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4)) expect_equal(datasetOfRates3$groups, c(1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4)) expect_equal(datasetOfRates3$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetOfRates3$sampleSizes, c(11, 8, 7, 9, 13, 10, 10, 11, 12, 9, 8, 5, 13, 11, 9, 2)) expect_equal(datasetOfRates3$events, c(10, 3, 2, 3, 10, 5, 4, 4, 12, 5, 3, 3, 12, 6, 5, 0)) expect_equal(datasetOfRates3$overallSampleSizes, c(11, 8, 7, 9, 24, 18, 17, 20, 36, 27, 25, 25, 49, 38, 34, 27)) expect_equal(datasetOfRates3$overallEvents, c(10, 3, 2, 3, 20, 8, 6, 7, 32, 13, 9, 10, 44, 19, 14, 10)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetOfRates3), NA))) expect_output(print(datasetOfRates3)$show()) invisible(capture.output(expect_error(summary(datasetOfRates3), NA))) expect_output(summary(datasetOfRates3)$show()) datasetOfRates3CodeBased <- eval(parse(text = getObjectRCode(datasetOfRates3, stringWrapParagraphWidth = NULL))) expect_equal(datasetOfRates3CodeBased$stages, datasetOfRates3$stages, tolerance = 1e-05) expect_equal(datasetOfRates3CodeBased$groups, datasetOfRates3$groups, tolerance = 1e-05) expect_equal(datasetOfRates3CodeBased$subsets, datasetOfRates3$subsets, tolerance = 1e-05) expect_equal(datasetOfRates3CodeBased$sampleSizes, datasetOfRates3$sampleSizes, tolerance = 1e-05) expect_equal(datasetOfRates3CodeBased$events, datasetOfRates3$events, tolerance = 1e-05) expect_equal(datasetOfRates3CodeBased$overallSampleSizes, datasetOfRates3$overallSampleSizes, tolerance = 1e-05) expect_equal(datasetOfRates3CodeBased$overallEvents, datasetOfRates3$overallEvents, tolerance = 1e-05) expect_type(names(datasetOfRates3), "character") df <- as.data.frame(datasetOfRates3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetOfRates3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetOfRates3$.data' with expected results expect_equal(datasetOfRates3$.data$stage, factor(c(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4))) expect_equal(datasetOfRates3$.data$group, factor(c(1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4))) expect_equal(datasetOfRates3$.data$subset, factor(c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA))) expect_equal(datasetOfRates3$.data$sampleSize, c(11, 8, 7, 9, 13, 10, 10, 11, 12, 9, 8, 5, 13, 11, 9, 2)) expect_equal(datasetOfRates3$.data$event, c(10, 3, 2, 3, 10, 5, 4, 4, 12, 5, 3, 3, 12, 6, 5, 0)) expect_equal(datasetOfRates3$.data$overallSampleSize, c(11, 8, 7, 9, 24, 18, 17, 20, 36, 27, 25, 25, 49, 38, 34, 27)) expect_equal(datasetOfRates3$.data$overallEvent, c(10, 3, 2, 3, 20, 8, 6, 7, 32, 13, 9, 10, 44, 19, 14, 10)) }) test_that("Creation of a dataset of rates using overall data (two groups)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetRates} datasetOfRates4 <- getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19) ) ## Comparison of the results of DatasetRates object 'datasetOfRates4' with expected results expect_equal(datasetOfRates4$stages, c(1, 1, 2, 2, 3, 3, 4, 4)) expect_equal(datasetOfRates4$groups, c(1, 2, 1, 2, 1, 2, 1, 2)) expect_equal(datasetOfRates4$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetOfRates4$sampleSizes, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates4$events, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates4$overallSampleSizes, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates4$overallEvents, c(10, 3, 20, 8, 32, 13, 44, 19)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetOfRates4), NA))) expect_output(print(datasetOfRates4)$show()) invisible(capture.output(expect_error(summary(datasetOfRates4), NA))) expect_output(summary(datasetOfRates4)$show()) datasetOfRates4CodeBased <- eval(parse(text = getObjectRCode(datasetOfRates4, stringWrapParagraphWidth = NULL))) expect_equal(datasetOfRates4CodeBased$stages, datasetOfRates4$stages, tolerance = 1e-05) expect_equal(datasetOfRates4CodeBased$groups, datasetOfRates4$groups, tolerance = 1e-05) expect_equal(datasetOfRates4CodeBased$subsets, datasetOfRates4$subsets, tolerance = 1e-05) expect_equal(datasetOfRates4CodeBased$sampleSizes, datasetOfRates4$sampleSizes, tolerance = 1e-05) expect_equal(datasetOfRates4CodeBased$events, datasetOfRates4$events, tolerance = 1e-05) expect_equal(datasetOfRates4CodeBased$overallSampleSizes, datasetOfRates4$overallSampleSizes, tolerance = 1e-05) expect_equal(datasetOfRates4CodeBased$overallEvents, datasetOfRates4$overallEvents, tolerance = 1e-05) expect_type(names(datasetOfRates4), "character") df <- as.data.frame(datasetOfRates4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetOfRates4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetOfRates4$.data' with expected results expect_equal(datasetOfRates4$.data$stage, factor(c(1, 1, 2, 2, 3, 3, 4, 4))) expect_equal(datasetOfRates4$.data$group, factor(c(1, 2, 1, 2, 1, 2, 1, 2))) expect_equal(datasetOfRates4$.data$subset, factor(c(NA, NA, NA, NA, NA, NA, NA, NA))) expect_equal(datasetOfRates4$.data$sampleSize, c(11, 8, 13, 10, 12, 9, 13, 11)) expect_equal(datasetOfRates4$.data$event, c(10, 3, 10, 5, 12, 5, 12, 6)) expect_equal(datasetOfRates4$.data$overallSampleSize, c(11, 8, 24, 18, 36, 27, 49, 38)) expect_equal(datasetOfRates4$.data$overallEvent, c(10, 3, 20, 8, 32, 13, 44, 19)) x <- getMultipleStageResultsForDataset(datasetOfRates4, thetaH0 = 0.99) ## Comparison of the results of StageResultsRates object 'x$stageResults1' with expected results expect_equal(x$stageResults1$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$overallEvents1, c(10, 20, 32, 44, NA_real_)) expect_equal(x$stageResults1$overallEvents2, c(3, 8, 13, 19, NA_real_)) expect_equal(x$stageResults1$overallSampleSizes1, c(11, 24, 36, 49, NA_real_)) expect_equal(x$stageResults1$overallSampleSizes2, c(8, 18, 27, 38, NA_real_)) expect_equal(x$stageResults1$overallPi1, c(0.90909091, 0.83333333, 0.88888889, 0.89795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$overallPi2, c(0.375, 0.44444444, 0.48148148, 0.5, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults1), NA))) expect_output(print(x$stageResults1)$show()) invisible(capture.output(expect_error(summary(x$stageResults1), NA))) expect_output(summary(x$stageResults1)$show()) x$stageResults1CodeBased <- eval(parse(text = getObjectRCode(x$stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults1CodeBased$overallTestStatistics, x$stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallEvents1, x$stageResults1$overallEvents1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallEvents2, x$stageResults1$overallEvents2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes1, x$stageResults1$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallSampleSizes2, x$stageResults1$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPi1, x$stageResults1$overallPi1, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPi2, x$stageResults1$overallPi2, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$testStatistics, x$stageResults1$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$pValues, x$stageResults1$pValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$effectSizes, x$stageResults1$effectSizes, tolerance = 1e-05) expect_type(names(x$stageResults1), "character") df <- as.data.frame(x$stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsRates object 'x$stageResults2' with expected results expect_equal(x$stageResults2$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$overallEvents1, c(10, 20, 32, 44, NA_real_)) expect_equal(x$stageResults2$overallEvents2, c(3, 8, 13, 19, NA_real_)) expect_equal(x$stageResults2$overallSampleSizes1, c(11, 24, 36, 49, NA_real_)) expect_equal(x$stageResults2$overallSampleSizes2, c(8, 18, 27, 38, NA_real_)) expect_equal(x$stageResults2$overallPi1, c(0.90909091, 0.83333333, 0.88888889, 0.89795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$overallPi2, c(0.375, 0.44444444, 0.48148148, 0.5, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(-21.273454, -30.085207, -36.846702, -42.546907, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults2), NA))) expect_output(print(x$stageResults2)$show()) invisible(capture.output(expect_error(summary(x$stageResults2), NA))) expect_output(summary(x$stageResults2)$show()) x$stageResults2CodeBased <- eval(parse(text = getObjectRCode(x$stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults2CodeBased$overallTestStatistics, x$stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallEvents1, x$stageResults2$overallEvents1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallEvents2, x$stageResults2$overallEvents2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes1, x$stageResults2$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallSampleSizes2, x$stageResults2$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPi1, x$stageResults2$overallPi1, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPi2, x$stageResults2$overallPi2, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$testStatistics, x$stageResults2$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$pValues, x$stageResults2$pValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$effectSizes, x$stageResults2$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$combInverseNormal, x$stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$weightsInverseNormal, x$stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(x$stageResults2), "character") df <- as.data.frame(x$stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsRates object 'x$stageResults3' with expected results expect_equal(x$stageResults3$overallTestStatistics, c(-13.397899, -26.707477, -31.300879, -37.503444, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallPValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$overallEvents1, c(10, 20, 32, 44, NA_real_)) expect_equal(x$stageResults3$overallEvents2, c(3, 8, 13, 19, NA_real_)) expect_equal(x$stageResults3$overallSampleSizes1, c(11, 24, 36, 49, NA_real_)) expect_equal(x$stageResults3$overallSampleSizes2, c(8, 18, 27, 38, NA_real_)) expect_equal(x$stageResults3$overallPi1, c(0.90909091, 0.83333333, 0.88888889, 0.89795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$overallPi2, c(0.375, 0.44444444, 0.48148148, 0.5, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$testStatistics, c(-13.397899, -23.909016, -16.449119, -20.614826, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$pValues, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(0.53409091, 0.38888889, 0.40740741, 0.39795918, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(1, 1, 1, 1, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults3), NA))) expect_output(print(x$stageResults3)$show()) invisible(capture.output(expect_error(summary(x$stageResults3), NA))) expect_output(summary(x$stageResults3)$show()) x$stageResults3CodeBased <- eval(parse(text = getObjectRCode(x$stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults3CodeBased$overallTestStatistics, x$stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallEvents1, x$stageResults3$overallEvents1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallEvents2, x$stageResults3$overallEvents2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes1, x$stageResults3$overallSampleSizes1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallSampleSizes2, x$stageResults3$overallSampleSizes2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPi1, x$stageResults3$overallPi1, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPi2, x$stageResults3$overallPi2, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$testStatistics, x$stageResults3$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$pValues, x$stageResults3$pValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$effectSizes, x$stageResults3$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$combFisher, x$stageResults3$combFisher, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$weightsFisher, x$stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(x$stageResults3), "character") df <- as.data.frame(x$stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Creation of a dataset of rates using overall data (three groups)", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetRates} datasetOfRates5 <- getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallSampleSizes3 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19), overallEvents3 = c(3, 7, 12, 20) ) ## Comparison of the results of DatasetRates object 'datasetOfRates5' with expected results expect_equal(datasetOfRates5$stages, c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4)) expect_equal(datasetOfRates5$groups, c(1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3)) expect_equal(datasetOfRates5$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetOfRates5$sampleSizes, c(11, 8, 8, 13, 10, 10, 12, 9, 9, 13, 11, 11)) expect_equal(datasetOfRates5$events, c(10, 3, 3, 10, 5, 4, 12, 5, 5, 12, 6, 8)) expect_equal(datasetOfRates5$overallSampleSizes, c(11, 8, 8, 24, 18, 18, 36, 27, 27, 49, 38, 38)) expect_equal(datasetOfRates5$overallEvents, c(10, 3, 3, 20, 8, 7, 32, 13, 12, 44, 19, 20)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetOfRates5), NA))) expect_output(print(datasetOfRates5)$show()) invisible(capture.output(expect_error(summary(datasetOfRates5), NA))) expect_output(summary(datasetOfRates5)$show()) datasetOfRates5CodeBased <- eval(parse(text = getObjectRCode(datasetOfRates5, stringWrapParagraphWidth = NULL))) expect_equal(datasetOfRates5CodeBased$stages, datasetOfRates5$stages, tolerance = 1e-05) expect_equal(datasetOfRates5CodeBased$groups, datasetOfRates5$groups, tolerance = 1e-05) expect_equal(datasetOfRates5CodeBased$subsets, datasetOfRates5$subsets, tolerance = 1e-05) expect_equal(datasetOfRates5CodeBased$sampleSizes, datasetOfRates5$sampleSizes, tolerance = 1e-05) expect_equal(datasetOfRates5CodeBased$events, datasetOfRates5$events, tolerance = 1e-05) expect_equal(datasetOfRates5CodeBased$overallSampleSizes, datasetOfRates5$overallSampleSizes, tolerance = 1e-05) expect_equal(datasetOfRates5CodeBased$overallEvents, datasetOfRates5$overallEvents, tolerance = 1e-05) expect_type(names(datasetOfRates5), "character") df <- as.data.frame(datasetOfRates5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetOfRates5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetOfRates5$.data' with expected results expect_equal(datasetOfRates5$.data$stage, factor(c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4))) expect_equal(datasetOfRates5$.data$group, factor(c(1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3))) expect_equal(datasetOfRates5$.data$subset, factor(c(NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA))) expect_equal(datasetOfRates5$.data$sampleSize, c(11, 8, 8, 13, 10, 10, 12, 9, 9, 13, 11, 11)) expect_equal(datasetOfRates5$.data$event, c(10, 3, 3, 10, 5, 4, 12, 5, 5, 12, 6, 8)) expect_equal(datasetOfRates5$.data$overallSampleSize, c(11, 8, 8, 24, 18, 18, 36, 27, 27, 49, 38, 38)) expect_equal(datasetOfRates5$.data$overallEvent, c(10, 3, 3, 20, 8, 7, 32, 13, 12, 44, 19, 20)) }) test_that("Trim command works as expected for rates", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetRates} datasetOfRatesExpected <- getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallSampleSizes3 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19), overallEvents3 = c(3, 7, 12, 20) ) datasetOfRates <- getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallSampleSizes3 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19), overallEvents3 = c(3, 7, 12, 20) ) datasetOfRates$.fillWithNAs(6) datasetOfRates$.trim(4) expect_equal(datasetOfRates$stages, datasetOfRatesExpected$stages) expect_equal(datasetOfRates$groups, datasetOfRatesExpected$groups) expect_equal(datasetOfRates$overallEvents, datasetOfRatesExpected$overallEvents) expect_equal(datasetOfRates$events, datasetOfRatesExpected$events) expect_equal(datasetOfRates$.data$stage, datasetOfRatesExpected$.data$stage) expect_equal(datasetOfRates$.data$group, datasetOfRatesExpected$.data$group) expect_equal(datasetOfRates$.data$overallEvent, datasetOfRatesExpected$.data$overallEvent) expect_equal(datasetOfRates$.data$event, datasetOfRatesExpected$.data$event) }) test_that("Creation of a dataset of survival data using stage wise data", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetSurvival} datasetSurvival1 <- getDataset( events = c(8, 7, 4, 12), allocationRatios = c(1, 1, 1, 3.58333333333333), logRanks = c(1.520, 1.273, 0.503, 0.887) ) ## Comparison of the results of DatasetSurvival object 'datasetSurvival1' with expected results expect_equal(datasetSurvival1$stages, c(1, 2, 3, 4)) expect_equal(datasetSurvival1$groups, c(1, 1, 1, 1)) expect_equal(datasetSurvival1$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetSurvival1$overallEvents, c(8, 15, 19, 31)) expect_equal(datasetSurvival1$overallAllocationRatios, c(1, 1, 1, 2), tolerance = 1e-07) expect_equal(datasetSurvival1$overallLogRanks, c(1.52, 1.9796756, 1.9897802, 2.1096275), tolerance = 1e-07) expect_equal(datasetSurvival1$events, c(8, 7, 4, 12)) expect_equal(datasetSurvival1$allocationRatios, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival1$logRanks, c(1.52, 1.273, 0.503, 0.887), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetSurvival1), NA))) expect_output(print(datasetSurvival1)$show()) invisible(capture.output(expect_error(summary(datasetSurvival1), NA))) expect_output(summary(datasetSurvival1)$show()) datasetSurvival1CodeBased <- eval(parse(text = getObjectRCode(datasetSurvival1, stringWrapParagraphWidth = NULL))) expect_equal(datasetSurvival1CodeBased$stages, datasetSurvival1$stages, tolerance = 1e-05) expect_equal(datasetSurvival1CodeBased$groups, datasetSurvival1$groups, tolerance = 1e-05) expect_equal(datasetSurvival1CodeBased$subsets, datasetSurvival1$subsets, tolerance = 1e-05) expect_equal(datasetSurvival1CodeBased$overallEvents, datasetSurvival1$overallEvents, tolerance = 1e-05) expect_equal(datasetSurvival1CodeBased$overallAllocationRatios, datasetSurvival1$overallAllocationRatios, tolerance = 1e-05) expect_equal(datasetSurvival1CodeBased$overallLogRanks, datasetSurvival1$overallLogRanks, tolerance = 1e-05) expect_equal(datasetSurvival1CodeBased$events, datasetSurvival1$events, tolerance = 1e-05) expect_equal(datasetSurvival1CodeBased$allocationRatios, datasetSurvival1$allocationRatios, tolerance = 1e-05) expect_equal(datasetSurvival1CodeBased$logRanks, datasetSurvival1$logRanks, tolerance = 1e-05) expect_type(names(datasetSurvival1), "character") df <- as.data.frame(datasetSurvival1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetSurvival1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetSurvival1$.data' with expected results expect_equal(datasetSurvival1$.data$stage, factor(c(1, 2, 3, 4))) expect_equal(datasetSurvival1$.data$group, factor(c(1, 1, 1, 1))) expect_equal(datasetSurvival1$.data$subset, factor(c(NA, NA, NA, NA))) expect_equal(datasetSurvival1$.data$overallEvent, c(8, 15, 19, 31)) expect_equal(datasetSurvival1$.data$overallAllocationRatio, c(1, 1, 1, 2), tolerance = 1e-07) expect_equal(datasetSurvival1$.data$overallLogRank, c(1.52, 1.9796756, 1.9897802, 2.1096275), tolerance = 1e-07) expect_equal(datasetSurvival1$.data$event, c(8, 7, 4, 12)) expect_equal(datasetSurvival1$.data$allocationRatio, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival1$.data$logRank, c(1.52, 1.273, 0.503, 0.887), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetSurvival1) ## Comparison of the results of StageResultsSurvival object 'x$stageResults1' with expected results expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults1$overallAllocationRatios, c(1, 1, 1, 2, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults1$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(2.9294137, 2.7795807, 2.4917213, 2.2339445, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults1), NA))) expect_output(print(x$stageResults1)$show()) invisible(capture.output(expect_error(summary(x$stageResults1), NA))) expect_output(summary(x$stageResults1)$show()) x$stageResults1CodeBased <- eval(parse(text = getObjectRCode(x$stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults1CodeBased$overallTestStatistics, x$stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallEvents, x$stageResults1$overallEvents, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallAllocationRatios, x$stageResults1$overallAllocationRatios, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$events, x$stageResults1$events, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$allocationRatios, x$stageResults1$allocationRatios, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$testStatistics, x$stageResults1$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$pValues, x$stageResults1$pValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$effectSizes, x$stageResults1$effectSizes, tolerance = 1e-05) expect_type(names(x$stageResults1), "character") df <- as.data.frame(x$stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsSurvival object 'x$stageResults2' with expected results expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults2$overallAllocationRatios, c(1, 1, 1, 2, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults2$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(2.9294137, 2.7795807, 2.4917213, 2.2339445, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults2), NA))) expect_output(print(x$stageResults2)$show()) invisible(capture.output(expect_error(summary(x$stageResults2), NA))) expect_output(summary(x$stageResults2)$show()) x$stageResults2CodeBased <- eval(parse(text = getObjectRCode(x$stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults2CodeBased$overallTestStatistics, x$stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallEvents, x$stageResults2$overallEvents, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallAllocationRatios, x$stageResults2$overallAllocationRatios, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$events, x$stageResults2$events, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$allocationRatios, x$stageResults2$allocationRatios, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$testStatistics, x$stageResults2$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$pValues, x$stageResults2$pValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$effectSizes, x$stageResults2$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$combInverseNormal, x$stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$weightsInverseNormal, x$stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(x$stageResults2), "character") df <- as.data.frame(x$stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsSurvival object 'x$stageResults3' with expected results expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults3$overallAllocationRatios, c(1, 1, 1, 2, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults3$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(2.9294137, 2.7795807, 2.4917213, 2.2339445, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults3), NA))) expect_output(print(x$stageResults3)$show()) invisible(capture.output(expect_error(summary(x$stageResults3), NA))) expect_output(summary(x$stageResults3)$show()) x$stageResults3CodeBased <- eval(parse(text = getObjectRCode(x$stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults3CodeBased$overallTestStatistics, x$stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallEvents, x$stageResults3$overallEvents, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallAllocationRatios, x$stageResults3$overallAllocationRatios, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$events, x$stageResults3$events, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$allocationRatios, x$stageResults3$allocationRatios, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$testStatistics, x$stageResults3$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$pValues, x$stageResults3$pValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$effectSizes, x$stageResults3$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$combFisher, x$stageResults3$combFisher, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$weightsFisher, x$stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(x$stageResults3), "character") df <- as.data.frame(x$stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } expect_equal(factor(datasetSurvival1$stages), datasetSurvival1$.data$stage, tolerance = 1e-07) expect_equal(factor(datasetSurvival1$groups), datasetSurvival1$.data$group, tolerance = 1e-07) expect_equal(datasetSurvival1$events, datasetSurvival1$.data$event, tolerance = 1e-07) expect_equal(datasetSurvival1$allocationRatios, datasetSurvival1$.data$allocationRatio, tolerance = 1e-07) expect_equal(datasetSurvival1$logRanks, datasetSurvival1$.data$logRank, tolerance = 1e-07) expect_equal(datasetSurvival1$overallEvents, datasetSurvival1$.data$overallEvent, tolerance = 1e-07) expect_equal(datasetSurvival1$overallAllocationRatios, datasetSurvival1$.data$overallAllocationRatio, tolerance = 1e-07) expect_equal(datasetSurvival1$overallLogRanks, datasetSurvival1$.data$overallLogRank, tolerance = 1e-07) }) test_that("Creation of a dataset of survival data using overall data", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetSurvival} datasetSurvival2 <- getDataset( overallEvents = c(8, 15, 19, 31), overallAllocationRatios = c(1, 1, 1, 2), overallLogRanks = c(1.52, 1.98, 1.99, 2.11) ) ## Comparison of the results of DatasetSurvival object 'datasetSurvival2' with expected results expect_equal(datasetSurvival2$stages, c(1, 2, 3, 4)) expect_equal(datasetSurvival2$groups, c(1, 1, 1, 1)) expect_equal(datasetSurvival2$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetSurvival2$overallEvents, c(8, 15, 19, 31)) expect_equal(datasetSurvival2$overallAllocationRatios, c(1, 1, 1, 2)) expect_equal(datasetSurvival2$overallLogRanks, c(1.52, 1.98, 1.99, 2.11), tolerance = 1e-07) expect_equal(datasetSurvival2$events, c(8, 7, 4, 12)) expect_equal(datasetSurvival2$allocationRatios, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival2$logRanks, c(1.52, 1.2734749, 0.50285094, 0.8873221), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetSurvival2), NA))) expect_output(print(datasetSurvival2)$show()) invisible(capture.output(expect_error(summary(datasetSurvival2), NA))) expect_output(summary(datasetSurvival2)$show()) datasetSurvival2CodeBased <- eval(parse(text = getObjectRCode(datasetSurvival2, stringWrapParagraphWidth = NULL))) expect_equal(datasetSurvival2CodeBased$stages, datasetSurvival2$stages, tolerance = 1e-05) expect_equal(datasetSurvival2CodeBased$groups, datasetSurvival2$groups, tolerance = 1e-05) expect_equal(datasetSurvival2CodeBased$subsets, datasetSurvival2$subsets, tolerance = 1e-05) expect_equal(datasetSurvival2CodeBased$overallEvents, datasetSurvival2$overallEvents, tolerance = 1e-05) expect_equal(datasetSurvival2CodeBased$overallAllocationRatios, datasetSurvival2$overallAllocationRatios, tolerance = 1e-05) expect_equal(datasetSurvival2CodeBased$overallLogRanks, datasetSurvival2$overallLogRanks, tolerance = 1e-05) expect_equal(datasetSurvival2CodeBased$events, datasetSurvival2$events, tolerance = 1e-05) expect_equal(datasetSurvival2CodeBased$allocationRatios, datasetSurvival2$allocationRatios, tolerance = 1e-05) expect_equal(datasetSurvival2CodeBased$logRanks, datasetSurvival2$logRanks, tolerance = 1e-05) expect_type(names(datasetSurvival2), "character") df <- as.data.frame(datasetSurvival2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetSurvival2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetSurvival2$.data' with expected results expect_equal(datasetSurvival2$.data$stage, factor(c(1, 2, 3, 4))) expect_equal(datasetSurvival2$.data$group, factor(c(1, 1, 1, 1))) expect_equal(datasetSurvival2$.data$subset, factor(c(NA, NA, NA, NA))) expect_equal(datasetSurvival2$.data$overallEvent, c(8, 15, 19, 31)) expect_equal(datasetSurvival2$.data$overallAllocationRatio, c(1, 1, 1, 2)) expect_equal(datasetSurvival2$.data$overallLogRank, c(1.52, 1.98, 1.99, 2.11), tolerance = 1e-07) expect_equal(datasetSurvival2$.data$event, c(8, 7, 4, 12)) expect_equal(datasetSurvival2$.data$allocationRatio, c(1, 1, 1, 3.5833333), tolerance = 1e-07) expect_equal(datasetSurvival2$.data$logRank, c(1.52, 1.2734749, 0.50285094, 0.8873221), tolerance = 1e-07) x <- getMultipleStageResultsForDataset(datasetSurvival2) ## Comparison of the results of StageResultsSurvival object 'x$stageResults1' with expected results expect_equal(x$stageResults1$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults1$overallAllocationRatios, c(1, 1, 1, 2, NA_real_)) expect_equal(x$stageResults1$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults1$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults1$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults1$effectSizes, c(2.9294137, 2.7800464, 2.4919726, 2.2342616, NA_real_), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults1), NA))) expect_output(print(x$stageResults1)$show()) invisible(capture.output(expect_error(summary(x$stageResults1), NA))) expect_output(summary(x$stageResults1)$show()) x$stageResults1CodeBased <- eval(parse(text = getObjectRCode(x$stageResults1, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults1CodeBased$overallTestStatistics, x$stageResults1$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallEvents, x$stageResults1$overallEvents, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallAllocationRatios, x$stageResults1$overallAllocationRatios, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$events, x$stageResults1$events, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$allocationRatios, x$stageResults1$allocationRatios, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$testStatistics, x$stageResults1$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$pValues, x$stageResults1$pValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$overallPValues, x$stageResults1$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults1CodeBased$effectSizes, x$stageResults1$effectSizes, tolerance = 1e-05) expect_type(names(x$stageResults1), "character") df <- as.data.frame(x$stageResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsSurvival object 'x$stageResults2' with expected results expect_equal(x$stageResults2$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults2$overallAllocationRatios, c(1, 1, 1, 2, NA_real_)) expect_equal(x$stageResults2$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults2$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$effectSizes, c(2.9294137, 2.7800464, 2.4919726, 2.2342616, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults2$combInverseNormal, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults2$weightsInverseNormal, c(0.4472136, 0.4472136, 0.4472136, 0.4472136, 0.4472136), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults2), NA))) expect_output(print(x$stageResults2)$show()) invisible(capture.output(expect_error(summary(x$stageResults2), NA))) expect_output(summary(x$stageResults2)$show()) x$stageResults2CodeBased <- eval(parse(text = getObjectRCode(x$stageResults2, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults2CodeBased$overallTestStatistics, x$stageResults2$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallEvents, x$stageResults2$overallEvents, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallAllocationRatios, x$stageResults2$overallAllocationRatios, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$events, x$stageResults2$events, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$allocationRatios, x$stageResults2$allocationRatios, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$testStatistics, x$stageResults2$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$pValues, x$stageResults2$pValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$overallPValues, x$stageResults2$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$effectSizes, x$stageResults2$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$combInverseNormal, x$stageResults2$combInverseNormal, tolerance = 1e-05) expect_equal(x$stageResults2CodeBased$weightsInverseNormal, x$stageResults2$weightsInverseNormal, tolerance = 1e-05) expect_type(names(x$stageResults2), "character") df <- as.data.frame(x$stageResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of StageResultsSurvival object 'x$stageResults3' with expected results expect_equal(x$stageResults3$overallTestStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallEvents, c(8, 15, 19, 31, NA_real_)) expect_equal(x$stageResults3$overallAllocationRatios, c(1, 1, 1, 2, NA_real_)) expect_equal(x$stageResults3$events, c(8, 7, 4, 12, NA_real_)) expect_equal(x$stageResults3$allocationRatios, c(1, 1, 1, 3.5833333, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$testStatistics, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$pValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$overallPValues, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$effectSizes, c(2.9294137, 2.7800464, 2.4919726, 2.2342616, NA_real_), tolerance = 1e-07) expect_equal(x$stageResults3$combFisher, c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$stageResults3$weightsFisher, c(1, 1, 1, 1, 1), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x$stageResults3), NA))) expect_output(print(x$stageResults3)$show()) invisible(capture.output(expect_error(summary(x$stageResults3), NA))) expect_output(summary(x$stageResults3)$show()) x$stageResults3CodeBased <- eval(parse(text = getObjectRCode(x$stageResults3, stringWrapParagraphWidth = NULL))) expect_equal(x$stageResults3CodeBased$overallTestStatistics, x$stageResults3$overallTestStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallEvents, x$stageResults3$overallEvents, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallAllocationRatios, x$stageResults3$overallAllocationRatios, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$events, x$stageResults3$events, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$allocationRatios, x$stageResults3$allocationRatios, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$testStatistics, x$stageResults3$testStatistics, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$pValues, x$stageResults3$pValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$overallPValues, x$stageResults3$overallPValues, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$effectSizes, x$stageResults3$effectSizes, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$combFisher, x$stageResults3$combFisher, tolerance = 1e-05) expect_equal(x$stageResults3CodeBased$weightsFisher, x$stageResults3$weightsFisher, tolerance = 1e-05) expect_type(names(x$stageResults3), "character") df <- as.data.frame(x$stageResults3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x$stageResults3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } datasetSurvival3 <- getDataset( events1 = c(25, 32), events2 = c(18, NA), events3 = c(22, 36), logRanks1 = -c(2.2, 1.8), logRanks2 = -c(1.99, NA), logRanks3 = -c(2.32, 2.11) ) ## Comparison of the results of DatasetSurvival object 'datasetSurvival3' with expected results expect_equal(datasetSurvival3$stages, c(1, 1, 1, 2, 2, 2)) expect_equal(datasetSurvival3$groups, c(1, 2, 3, 1, 2, 3)) expect_equal(datasetSurvival3$subsets, c(NA_character_, NA_character_, NA_character_, NA_character_, NA_character_, NA_character_)) expect_equal(datasetSurvival3$overallEvents, c(25, 18, 22, 57, NA_real_, 58)) expect_equal(datasetSurvival3$overallAllocationRatios, c(1, 1, 1, 1, NA_real_, 1)) expect_equal(datasetSurvival3$overallLogRanks, c(-2.2, -1.99, -2.32, -2.8056692, NA_real_, -3.0911851), tolerance = 1e-07) expect_equal(datasetSurvival3$events, c(25, 18, 22, 32, NA_real_, 36)) expect_equal(datasetSurvival3$allocationRatios, c(1, 1, 1, 1, NA_real_, 1)) expect_equal(datasetSurvival3$logRanks, c(-2.2, -1.99, -2.32, -1.8, NA_real_, -2.11), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(datasetSurvival3), NA))) expect_output(print(datasetSurvival3)$show()) invisible(capture.output(expect_error(summary(datasetSurvival3), NA))) expect_output(summary(datasetSurvival3)$show()) datasetSurvival3CodeBased <- eval(parse(text = getObjectRCode(datasetSurvival3, stringWrapParagraphWidth = NULL))) expect_equal(datasetSurvival3CodeBased$stages, datasetSurvival3$stages, tolerance = 1e-05) expect_equal(datasetSurvival3CodeBased$groups, datasetSurvival3$groups, tolerance = 1e-05) expect_equal(datasetSurvival3CodeBased$subsets, datasetSurvival3$subsets, tolerance = 1e-05) expect_equal(datasetSurvival3CodeBased$overallEvents, datasetSurvival3$overallEvents, tolerance = 1e-05) expect_equal(datasetSurvival3CodeBased$overallAllocationRatios, datasetSurvival3$overallAllocationRatios, tolerance = 1e-05) expect_equal(datasetSurvival3CodeBased$overallLogRanks, datasetSurvival3$overallLogRanks, tolerance = 1e-05) expect_equal(datasetSurvival3CodeBased$events, datasetSurvival3$events, tolerance = 1e-05) expect_equal(datasetSurvival3CodeBased$allocationRatios, datasetSurvival3$allocationRatios, tolerance = 1e-05) expect_equal(datasetSurvival3CodeBased$logRanks, datasetSurvival3$logRanks, tolerance = 1e-05) expect_type(names(datasetSurvival3), "character") df <- as.data.frame(datasetSurvival3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(datasetSurvival3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } ## Comparison of the results of data.frame object 'datasetSurvival3$.data' with expected results expect_equal(datasetSurvival3$.data$stage, factor(c(1, 1, 1, 2, 2, 2))) expect_equal(datasetSurvival3$.data$group, factor(c(1, 2, 3, 1, 2, 3))) expect_equal(datasetSurvival3$.data$subset, factor(c(NA, NA, NA, NA, NA, NA))) expect_equal(datasetSurvival3$.data$overallEvent, c(25, 18, 22, 57, NA_real_, 58)) expect_equal(datasetSurvival3$.data$overallAllocationRatio, c(1, 1, 1, 1, NA_real_, 1)) expect_equal(datasetSurvival3$.data$overallLogRank, c(-2.2, -1.99, -2.32, -2.8056692, NA_real_, -3.0911851), tolerance = 1e-07) expect_equal(datasetSurvival3$.data$event, c(25, 18, 22, 32, NA_real_, 36)) expect_equal(datasetSurvival3$.data$allocationRatio, c(1, 1, 1, 1, NA_real_, 1)) expect_equal(datasetSurvival3$.data$logRank, c(-2.2, -1.99, -2.32, -1.8, NA_real_, -2.11), tolerance = 1e-07) }) test_that("Trim command works as expected for suvival data", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetSurvival} dataExampleSurvivalExpected <- getDataset( events1 = c(25, 32), events2 = c(18, NA), events3 = c(22, 36), logRanks1 = c(2.2, 1.8), logRanks2 = c(1.99, NA), logRanks3 = c(2.32, 2.11) ) dataExampleSurvival <- getDataset( events1 = c(25, 32), events2 = c(18, NA), events3 = c(22, 36), logRanks1 = c(2.2, 1.8), logRanks2 = c(1.99, NA), logRanks3 = c(2.32, 2.11) ) dataExampleSurvival$.fillWithNAs(4) dataExampleSurvival$.trim(2) expect_equal(dataExampleSurvival$stages, dataExampleSurvivalExpected$stages) expect_equal(dataExampleSurvival$groups, dataExampleSurvivalExpected$groups) expect_equal(dataExampleSurvival$overallEvents, dataExampleSurvivalExpected$overallEvents) expect_equal(dataExampleSurvival$overallAllocationRatios, dataExampleSurvivalExpected$overallAllocationRatios) expect_equal(dataExampleSurvival$overallLogRanks, dataExampleSurvivalExpected$overallLogRanks, tolerance = 1e-07) expect_equal(dataExampleSurvival$events, dataExampleSurvivalExpected$events) expect_equal(dataExampleSurvival$allocationRatios, dataExampleSurvivalExpected$allocationRatios) expect_equal(dataExampleSurvival$logRanks, dataExampleSurvivalExpected$logRanks, tolerance = 1e-07) expect_equal(dataExampleSurvival$.data$stage, dataExampleSurvivalExpected$.data$stage) expect_equal(dataExampleSurvival$.data$group, dataExampleSurvivalExpected$.data$group) expect_equal(dataExampleSurvival$.data$overallEvent, dataExampleSurvivalExpected$.data$overallEvent) expect_equal(dataExampleSurvival$.data$overallAllocationRatio, dataExampleSurvivalExpected$.data$overallAllocationRatio) expect_equal(dataExampleSurvival$.data$overallLogRank, dataExampleSurvivalExpected$.data$overallLogRank, tolerance = 1e-07) expect_equal(dataExampleSurvival$.data$event, dataExampleSurvivalExpected$.data$event) expect_equal(dataExampleSurvival$.data$allocationRatio, dataExampleSurvivalExpected$.data$allocationRatio) expect_equal(dataExampleSurvival$.data$logRank, dataExampleSurvivalExpected$.data$logRank, tolerance = 1e-07) }) test_that("Dataset functions 'getNumberOfStages' and 'getNumberOfGroups' work as expected for means", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetMeans} suppressWarnings( data1 <- getDataset( overallN1 = c(22, 33, NA), overallN2 = c(20, 34, 56), overallN3 = c(22, 31, 52), overallMeans1 = c(1.64, 1.54, NA), overallMeans2 = c(1.7, 1.5, 1.77), overallMeans3 = c(2.5, 2.06, 2.99), overallStDevs1 = c(1.5, 1.9, NA), overallStDevs2 = c(1.3, 1.3, 1.1), overallStDevs3 = c(1, 1.3, 1.8) ) ) expect_equal(data1$getNumberOfStages(), 3) expect_equal(data1$getNumberOfStages(FALSE), 3) expect_equal(data1$getNumberOfGroups(), 3) expect_equal(data1$getNumberOfGroups(survivalCorrectionEnabled = FALSE), 3) .skipTestIfDisabled() data2 <- getDataset( overallN1 = c(22, 33, 55), overallN2 = c(20, 34, 56), overallN3 = c(22, 31, 52), overallMeans1 = c(1.64, 1.54, 2.10), overallMeans2 = c(1.7, 1.5, 1.77), overallMeans3 = c(2.5, 2.06, 2.99), overallStDevs1 = c(1.5, 1.9, 1.7), overallStDevs2 = c(1.3, 1.3, 1.1), overallStDevs3 = c(1, 1.3, 1.8) ) expect_equal(data2$getNumberOfStages(), 3) expect_equal(data2$getNumberOfStages(FALSE), 3) expect_equal(data2$getNumberOfGroups(), 3) expect_equal(data2$getNumberOfGroups(survivalCorrectionEnabled = FALSE), 3) data3 <- getDataset( overallN1 = c(22, 33, 55), overallN2 = c(20, 34, 56), overallN3 = c(22, 31, 52), overallMeans1 = c(1.64, 1.54, 2.10), overallMeans2 = c(1.7, 1.5, 1.77), overallMeans3 = c(2.5, 2.06, 2.99), overallStDevs1 = c(1.5, 1.9, 1.7), overallStDevs2 = c(1.3, 1.3, 1.1), overallStDevs3 = c(1, 1.3, 1.8) ) expect_equal(data3$getNumberOfStages(), 3) expect_equal(data3$getNumberOfStages(FALSE), 3) expect_equal(data3$getNumberOfGroups(), 3) expect_equal(data3$getNumberOfGroups(survivalCorrectionEnabled = FALSE), 3) }) test_that("Dataset functions 'getNumberOfStages' and 'getNumberOfGroups' work as expected for rates", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetRates} data1 <- getDataset( overallSampleSizes1 = c(11, 24, 36, NA), overallSampleSizes2 = c(8, 18, 27, NA), overallSampleSizes3 = c(8, 18, 27, NA), overallEvents1 = c(10, 20, 32, NA), overallEvents2 = c(3, 8, 13, NA), overallEvents3 = c(3, 7, 12, NA) ) expect_equal(data1$getNumberOfStages(), 3) expect_equal(data1$getNumberOfStages(FALSE), 4) expect_equal(data1$getNumberOfGroups(), 3) expect_equal(data1$getNumberOfGroups(survivalCorrectionEnabled = FALSE), 3) .skipTestIfDisabled() data2 <- getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallSampleSizes3 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19), overallEvents3 = c(3, 7, 12, 20) ) expect_equal(data2$getNumberOfStages(), 4) expect_equal(data2$getNumberOfStages(FALSE), 4) expect_equal(data2$getNumberOfGroups(), 3) expect_equal(data2$getNumberOfGroups(survivalCorrectionEnabled = FALSE), 3) data3 <- getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, NA, NA), overallSampleSizes3 = c(8, 18, NA, NA), overallSampleSizes4 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, NA, NA), overallEvents3 = c(3, 8, NA, NA), overallEvents4 = c(3, 7, 12, 20) ) expect_equal(data3$getNumberOfStages(), 4) expect_equal(data3$getNumberOfStages(FALSE), 4) expect_equal(data3$getNumberOfGroups(), 4) expect_equal(data3$getNumberOfGroups(survivalCorrectionEnabled = FALSE), 4) data4 <- getDataset( overallSampleSizes1 = c(11, 24, 36), overallSampleSizes2 = c(8, 18, 27), overallEvents1 = c(10, 20, 32), overallEvents2 = c(3, 7, 12) ) expect_equal(data4$getNumberOfStages(), 3) expect_equal(data4$getNumberOfStages(FALSE), 3) expect_equal(data4$getNumberOfGroups(), 2) expect_equal(data4$getNumberOfGroups(survivalCorrectionEnabled = FALSE), 2) data5 <- getDataset( overallSampleSizes1 = c(11, 24, NA), overallSampleSizes2 = c(8, 18, NA), overallEvents1 = c(10, 20, NA), overallEvents2 = c(3, 7, NA) ) expect_equal(data5$getNumberOfStages(), 2) expect_equal(data5$getNumberOfStages(FALSE), 3) expect_equal(data5$getNumberOfGroups(), 2) expect_equal(data5$getNumberOfGroups(survivalCorrectionEnabled = FALSE), 2) data6 <- getDataset( overallSampleSizes = c(11, 24, NA), overallEvents = c(3, 7, NA) ) expect_equal(data6$getNumberOfStages(), 2) expect_equal(data6$getNumberOfStages(FALSE), 3) expect_equal(data6$getNumberOfGroups(), 1) expect_equal(data6$getNumberOfGroups(survivalCorrectionEnabled = FALSE), 1) }) test_that("Dataset functions 'getNumberOfStages' and 'getNumberOfGroups' work as expected for survival data", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} # @refFS[Tab.]fs:tab:output:getDatasetSurvival} data3 <- getDataset( overallEvents1 = c(13, 33), overallLogRanks1 = c(1.23, 1.55), overallEvents2 = c(16, 33), overallLogRanks2 = c(1.55, 2.2) ) expect_equal(data3$getNumberOfStages(), 2) expect_equal(data3$getNumberOfStages(FALSE), 2) expect_equal(data3$getNumberOfGroups(), 3) expect_equal(data3$getNumberOfGroups(survivalCorrectionEnabled = FALSE), 2) data4 <- getDataset( events1 = c(13, NA), logRanks1 = c(1.23, NA), events2 = c(16, NA), logRanks2 = c(1.55, NA) ) expect_equal(data4$getNumberOfStages(), 1) expect_equal(data4$getNumberOfStages(FALSE), 2) expect_equal(data4$getNumberOfGroups(), 3) expect_equal(data4$getNumberOfGroups(survivalCorrectionEnabled = FALSE), 2) }) test_that("Function '.naOmitBackward' works as expected", { .skipTestIfDisabled() expect_equal(.naOmitBackward(c(1, NA_real_, 3, NA_real_)), c(1, NA_real_, 3)) expect_equal(.naOmitBackward(c(1, NA_real_, 3, NA_real_, 5)), c(1, NA_real_, 3, NA_real_, 5)) expect_equal(.naOmitBackward(c(1, NA_real_, NA_real_)), c(1)) expect_equal(.naOmitBackward(c(1, NA_real_, NA_real_, 4)), c(1, NA_real_, NA_real_, 4)) expect_equal(.naOmitBackward(c(1)), c(1)) expect_equal(.naOmitBackward(c(NA_real_)), c(NA_real_)) expect_equal(.naOmitBackward(c(1, 2, NA_real_)), c(1, 2)) }) test_plan_section("Testing that 'getDataset' Throws Exceptions as Expected") test_that("Wrong parameter usage of 'getDataset'", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:dataInputVariants} expect_error(getDataset(), "Missing argument: data.frame, data vectors, or datasets expected", fixed = TRUE ) expect_error(getDataset(1), "Illegal argument: all parameters must be named", fixed = TRUE ) expect_error(getDataset(n = 1), "Illegal argument: failed to identify dataset type", fixed = TRUE ) expect_error(getDataset(1, x = 2), "Illegal argument: all parameters must be named", fixed = TRUE ) expect_error(getDataset( overallSampleSizes1 = c(11, 24, 36, 49), overallSampleSizes2 = c(8, 18, 27, 38), overallSampleSizes3 = c(8, 18, 27, 38), overallEvents1 = c(10, 20, 32, 44), overallEvents2 = c(3, 8, 13, 19), overallEvents3 = c(3, 8, 13, 19), overallEvents1 = c(3, 8, 13, 19) ), "Illegal argument: the parameter names must be unique", fixed = TRUE) }) test_plan_section("Testing datasets for enrichment designs") test_that("Creation of a dataset of means with subsets", { .skipTestIfDisabled() x <- getDataset( stage = c(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3), subset = c("S1", "S2", "S12", "R", "S1", "S2", "S12", "R", "S1", "S2", "S12", "R"), sampleSize1 = c(12, 14, 21, 33, 33, 22, 12, 14, 21, 33, 33, 22), sampleSize2 = c(18, 11, 21, 9, 17, 18, 12, 14, 21, 33, 33, 22), mean1 = c(107.7, 68.3, 84.9, 77.1, 77.7, 127.4, 107.7, 68.3, 84.9, 77.1, 77.7, 127.4), mean2 = c(165.6, 120.1, 195.9, 162.4, 111.1, 100.9, 107.7, 68.3, 84.9, 77.1, 77.7, 127.4), stDev1 = c(128.5, 124.0, 139.5, 163.5, 133.3, 134.7, 107.7, 68.3, 84.9, 77.1, 77.7, 127.4), stDev2 = c(120.1, 116.8, 185.0, 120.6, 145.6, 133.7, 107.7, 68.3, 84.9, 77.1, 77.7, 127.4) ) ## Comparison of the results of DatasetMeans object 'x' with expected results expect_equal(x$stages, c(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3)) expect_equal(x$groups, c(1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2)) expect_equal(x$subsets, c("S1", "S2", "S12", "R", "S1", "S2", "S12", "R", "S1", "S2", "S12", "R", "S1", "S2", "S12", "R", "S1", "S2", "S12", "R", "S1", "S2", "S12", "R")) expect_equal(x$sampleSizes, c(12, 14, 21, 33, 18, 11, 21, 9, 33, 22, 12, 14, 17, 18, 12, 14, 21, 33, 33, 22, 21, 33, 33, 22)) expect_equal(x$means, c(107.7, 68.3, 84.9, 77.1, 165.6, 120.1, 195.9, 162.4, 77.7, 127.4, 107.7, 68.3, 111.1, 100.9, 107.7, 68.3, 84.9, 77.1, 77.7, 127.4, 84.9, 77.1, 77.7, 127.4), tolerance = 1e-07) expect_equal(x$stDevs, c(128.5, 124, 139.5, 163.5, 120.1, 116.8, 185, 120.6, 133.3, 134.7, 107.7, 68.3, 145.6, 133.7, 107.7, 68.3, 84.9, 77.1, 77.7, 127.4, 84.9, 77.1, 77.7, 127.4), tolerance = 1e-07) expect_equal(x$overallSampleSizes, c(12, 14, 21, 33, 18, 11, 21, 9, 45, 36, 33, 47, 35, 29, 33, 23, 66, 69, 66, 69, 56, 62, 66, 45)) expect_equal(x$overallMeans, c(107.7, 68.3, 84.9, 77.1, 165.6, 120.1, 195.9, 162.4, 85.7, 104.41667, 93.190909, 74.478723, 139.12857, 108.18276, 163.82727, 105.12174, 85.445455, 91.352174, 85.445455, 91.352174, 118.79286, 91.63871, 120.76364, 116.01333), tolerance = 1e-07) expect_equal(x$overallStDevs, c(128.5, 124, 139.5, 163.5, 120.1, 116.8, 185, 120.6, 131.26649, 132.10351, 127.56945, 141.17802, 133.9849, 125.75856, 165.02815, 101.24395, 117.82181, 109.40115, 105.0948, 138.24808, 120.08511, 103.06452, 135.14016, 114.01099), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$stages, x$stages, tolerance = 1e-05) expect_equal(xCodeBased$groups, x$groups, tolerance = 1e-05) expect_equal(xCodeBased$subsets, x$subsets, tolerance = 1e-05) expect_equal(xCodeBased$sampleSizes, x$sampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$means, x$means, tolerance = 1e-05) expect_equal(xCodeBased$stDevs, x$stDevs, tolerance = 1e-05) expect_equal(xCodeBased$overallSampleSizes, x$overallSampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$overallMeans, x$overallMeans, tolerance = 1e-05) expect_equal(xCodeBased$overallStDevs, x$overallStDevs, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() x2 <- getDataset( stages = c(1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3), subsets = c("S2", "S12", "S1", "R", "S2", "S12", "S1", "R", "S2", "S12", "S1", "R"), overallSampleSizes1 = c(14, 21, 12, 33, 36, 33, 45, 47, 69, 66, 66, 69), overallSampleSizes2 = c(11, 21, 18, 9, 29, 33, 35, 23, 62, 66, 56, 45), overallMeans1 = c(68.3, 84.9, 107.7, 77.1, 104.417, 93.191, 85.7, 74.479, 91.352, 85.445, 85.445, 91.352), overallMeans2 = c(120.1, 195.9, 165.6, 162.4, 108.183, 163.827, 139.129, 105.122, 91.639, 120.764, 118.793, 116.013), overallStDevs1 = c(124, 139.5, 128.5, 163.5, 132.104, 127.569, 131.266, 141.178, 109.401, 105.095, 117.822, 138.248), overallStDevs2 = c(116.8, 185, 120.1, 120.6, 125.759, 165.028, 133.985, 101.244, 103.065, 135.14, 120.085, 114.011) ) ## Comparison of the results of DatasetMeans object 'x2' with expected results expect_equal(x2$stages, c(1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3)) expect_equal(x2$groups, c(1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2)) expect_equal(x2$subsets, c("S1", "S2", "S12", "R", "S1", "S2", "S12", "R", "S1", "S2", "S12", "R", "S1", "S2", "S12", "R", "S1", "S2", "S12", "R", "S1", "S2", "S12", "R")) expect_equal(x2$sampleSizes, c(12, 14, 21, 33, 18, 11, 21, 9, 33, 22, 12, 14, 17, 18, 12, 14, 21, 33, 33, 22, 21, 33, 33, 22)) expect_equal(x2$means, c(107.7, 68.3, 84.9, 77.1, 165.6, 120.1, 195.9, 162.4, 77.7, 127.40055, 107.70025, 68.300929, 111.10088, 100.90039, 107.69925, 68.300429, 84.898571, 77.099273, 77.699, 127.39886, 84.899667, 77.100333, 77.701, 127.39905), tolerance = 1e-07) expect_equal(x2$stDevs, c(128.5, 124, 139.5, 163.5, 120.1, 116.8, 185, 120.6, 133.29934, 134.7007, 107.69841, 68.299913, 145.60038, 133.7007, 107.6989, 68.300382, 84.902527, 77.098435, 77.701172, 127.40021, 84.898999, 77.100624, 77.70049, 127.40009), tolerance = 1e-07) expect_equal(x2$overallSampleSizes, c(12, 14, 21, 33, 18, 11, 21, 9, 45, 36, 33, 47, 35, 29, 33, 23, 66, 69, 66, 69, 56, 62, 66, 45)) expect_equal(x2$overallMeans, c(107.7, 68.3, 84.9, 77.1, 165.6, 120.1, 195.9, 162.4, 85.7, 104.417, 93.191, 74.479, 139.129, 108.183, 163.827, 105.122, 85.445, 91.352, 85.445, 91.352, 118.793, 91.639, 120.764, 116.013), tolerance = 1e-07) expect_equal(x2$overallStDevs, c(128.5, 124, 139.5, 163.5, 120.1, 116.8, 185, 120.6, 131.266, 132.104, 127.569, 141.178, 133.985, 125.759, 165.028, 101.244, 117.822, 109.401, 105.095, 138.248, 120.085, 103.065, 135.14, 114.011), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$stages, x2$stages, tolerance = 1e-05) expect_equal(x2CodeBased$groups, x2$groups, tolerance = 1e-05) expect_equal(x2CodeBased$subsets, x2$subsets, tolerance = 1e-05) expect_equal(x2CodeBased$sampleSizes, x2$sampleSizes, tolerance = 1e-05) expect_equal(x2CodeBased$means, x2$means, tolerance = 1e-05) expect_equal(x2CodeBased$stDevs, x2$stDevs, tolerance = 1e-05) expect_equal(x2CodeBased$overallSampleSizes, x2$overallSampleSizes, tolerance = 1e-05) expect_equal(x2CodeBased$overallMeans, x2$overallMeans, tolerance = 1e-05) expect_equal(x2CodeBased$overallStDevs, x2$overallStDevs, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } expect_equal(x$sampleSizes, x2$sampleSizes) expect_equal(x$means, x2$means, tolerance = 1e-05) expect_equal(x$stDevs, x2$stDevs, tolerance = 1e-05) expect_equal(x$overallSampleSizes, x2$overallSampleSizes) expect_equal(x$overallMeans, x2$overallMeans, tolerance = 1e-05) expect_equal(x$overallStDevs, x2$overallStDevs, tolerance = 1e-05) }) test_that("Creation of a dataset of rates with subsets", { .skipTestIfDisabled() suppressWarnings( x <- getDataset( stage = c(1, 1, 2, 2), subset = c("S1", "R", "S1", "R"), sampleSizes1 = c(11, 24, 36, 49), sampleSizes2 = c(8, 18, 27, 38), sampleSizes3 = c(8, 18, 27, 38), events1 = c(10, 20, 32, 44), events2 = c(3, 8, 13, 19), events3 = c(3, 7, 12, 20) ) ) ## Comparison of the results of DatasetRates object 'x' with expected results expect_equal(x$stages, c(1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2)) expect_equal(x$groups, c(1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3)) expect_equal(x$subsets, c("S1", "R", "S1", "R", "S1", "R", "S1", "R", "S1", "R", "S1", "R")) expect_equal(x$sampleSizes, c(11, 24, 8, 18, 8, 18, 36, 49, 27, 38, 27, 38)) expect_equal(x$events, c(10, 20, 3, 8, 3, 7, 32, 44, 13, 19, 12, 20)) expect_equal(x$overallSampleSizes, c(11, 24, 8, 18, 8, 18, 47, 73, 35, 56, 35, 56)) expect_equal(x$overallEvents, c(10, 20, 3, 8, 3, 7, 42, 64, 16, 27, 15, 27)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$stages, x$stages, tolerance = 1e-05) expect_equal(xCodeBased$groups, x$groups, tolerance = 1e-05) expect_equal(xCodeBased$subsets, x$subsets, tolerance = 1e-05) expect_equal(xCodeBased$sampleSizes, x$sampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$events, x$events, tolerance = 1e-05) expect_equal(xCodeBased$overallSampleSizes, x$overallSampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$overallEvents, x$overallEvents, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Creation of a dataset of survival data with subsets", { .skipTestIfDisabled() suppressWarnings( x <- getDataset( stage = c(1, 1, 2, 2), subset = c("S1", "R", "S1", "R"), events1 = c(10, 20, 32, 44), events2 = c(3, 8, 13, 19), events3 = c(3, 7, 12, 20), logRanks1 = c(2.2, 1.8, 1.9, 2.1), logRanks2 = c(1.99, 2.01, 2.05, 2.09), logRanks3 = c(2.32, 2.11, 2.14, 2.17) ) ) ## Comparison of the results of DatasetSurvival object 'x' with expected results expect_equal(x$stages, c(1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2)) expect_equal(x$groups, c(1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3)) expect_equal(x$subsets, c("S1", "R", "S1", "R", "S1", "R", "S1", "R", "S1", "R", "S1", "R")) expect_equal(x$overallEvents, c(10, 20, 3, 8, 3, 7, 42, 64, 16, 27, 15, 27)) expect_equal(x$overallAllocationRatios, c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)) expect_equal(x$overallLogRanks, c(2.2, 1.8, 1.99, 2.01, 2.32, 2.11, 2.731946, 2.7474586, 2.7095403, 2.8473447, 2.9516097, 2.941998), tolerance = 1e-07) expect_equal(x$events, c(10, 20, 3, 8, 3, 7, 32, 44, 13, 19, 12, 20)) expect_equal(x$allocationRatios, c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)) expect_equal(x$logRanks, c(2.2, 1.8, 1.99, 2.01, 2.32, 2.11, 1.9, 2.1, 2.05, 2.09, 2.14, 2.17), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) suppressWarnings(xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL)))) expect_equal(xCodeBased$stages, x$stages, tolerance = 1e-05) expect_equal(xCodeBased$groups, x$groups, tolerance = 1e-05) expect_equal(xCodeBased$subsets, x$subsets, tolerance = 1e-05) expect_equal(xCodeBased$overallEvents, x$overallEvents, tolerance = 1e-05) expect_equal(xCodeBased$overallAllocationRatios, x$overallAllocationRatios, tolerance = 1e-05) expect_equal(xCodeBased$overallLogRanks, x$overallLogRanks, tolerance = 1e-05) expect_equal(xCodeBased$events, x$events, tolerance = 1e-05) expect_equal(xCodeBased$allocationRatios, x$allocationRatios, tolerance = 1e-05) expect_equal(xCodeBased$logRanks, x$logRanks, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("Illegal creation of a dataset of means with subsets: invalid sample size", { .skipTestIfDisabled() expect_error( getDataset( sampleSize1 = c(NA, NA), sampleSize2 = c(NA, NA), mean1 = c(NA, NA), mean2 = c(NA, NA), stDev1 = c(NA, NA), stDev2 = c(NA, NA) ), "Illegal argument: 'sampleSize1' is NA at first stage; a valid numeric value must be specified at stage 1", fixed = TRUE ) }) test_that("Illegal creation of a dataset of means with subsets: too small standard deviation (one subset)", { .skipTestIfDisabled() S1 <- getDataset( sampleSize1 = c(12, 21), sampleSize2 = c(18, 21), mean1 = c(107.7, 84.9), mean2 = c(165.6, 195.9), stDev1 = c(128.5, 139.5), stDev2 = c(120.1, 185.0) ) F <- getDataset( sampleSize1 = c(26, NA), sampleSize2 = c(29, NA), mean1 = c(86.48462, NA), mean2 = c(148.34138, NA), stDev1 = c(125.1485, NA), stDev2 = c(118.888, NA) ) expect_error(getDataset(S1 = S1, F = F), "Conflicting arguments: 'stDev' F (125.148) must be > 'stDev' S1 (128.5) in group 1 at stage 1", fixed = TRUE ) }) test_that("Illegal creation of a dataset of means with subsets: too small sample size in F (one group)", { .skipTestIfDisabled() S1 <- getDataset( sampleSize1 = c(12, 21), sampleSize2 = c(30, 21), mean1 = c(107.7, 84.9), mean2 = c(165.6, 195.9), stDev1 = c(128.5, 139.5), stDev2 = c(120.1, 185.0) ) F <- getDataset( sampleSize1 = c(26, NA), sampleSize2 = c(29, NA), mean1 = c(86.48462, NA), mean2 = c(148.34138, NA), stDev1 = c(129.1485, NA), stDev2 = c(122.888, NA) ) expect_error(getDataset(S1 = S1, F = F), "Conflicting arguments: 'sampleSize' F (29) must be >= 'sampleSize' S1 (30) in group 2 at stage 1", fixed = TRUE ) }) test_that("Illegal creation of a dataset of means with subsets: wrong deselection (one group)", { .skipTestIfDisabled() S1 <- getDataset( sampleSize1 = c(12, NA), sampleSize2 = c(18, NA), mean1 = c(107.7, NA), mean2 = c(165.6, NA), stDev1 = c(128.5, NA), stDev2 = c(120.1, NA) ) R <- getDataset( sampleSize1 = c(14, 21), sampleSize2 = c(11, 21), mean1 = c(68.3, 84.9), mean2 = c(120.1, 195.9), stDev1 = c(124.0, 139.5), stDev2 = c(116.8, 185.0) ) expect_error(getDataset(S1 = S1, R = R), paste0( "Conflicting arguments: if S1 is deselected (NA) then R also must be deselected (NA) ", "but, e.g., ", sQuote("sampleSize"), " R is 21 in group 1 at stage 2" ), fixed = TRUE ) }) test_that("Illegal creation of a dataset of means with subsets: inconsistent number of stages", { .skipTestIfDisabled() expect_error( getDataset( sampleSize1 = c(12, NA, 21), sampleSize2 = c(18, NA, 21), mean1 = c(107.7, NA, 84.9), mean2 = c(165.6, NA, 195.9), stDev1 = c(128.5, NA, 139.5), stDev2 = c(120.1, NA, 185.0) ), paste0( "Illegal argument: 'sampleSize1' contains a NA at stage 2 followed by a ", "value for a higher stage; NA's must be the last values" ), fixed = TRUE ) S1 <- getDataset( sampleSize1 = c(12, 21), sampleSize2 = c(18, 21), mean1 = c(107.7, 84.9), mean2 = c(165.6, 195.9), stDev1 = c(128.5, 139.5), stDev2 = c(120.1, 185.0) ) R <- getDataset( sampleSize1 = c(14, NA, NA), sampleSize2 = c(11, NA, NA), mean1 = c(68.3, NA, NA), mean2 = c(120.1, NA, NA), stDev1 = c(124.0, NA, NA), stDev2 = c(116.8, NA, NA) ) expect_error(getDataset(S1 = S1, R = R), paste0( "Conflicting arguments: all subsets must have the identical ", "number of stages defined (kMax: S1 = 2, R = 3)" ), fixed = TRUE ) }) test_that("Illegal creation of a dataset of means with subsets: too small standard deviation in F (two subsets)", { .skipTestIfDisabled() S1N <- getDataset( sampleSize1 = c(39, 34, NA), sampleSize2 = c(33, 45, NA), stDev1 = c(156.5026, 120.084, NA), stDev2 = c(134.0254, 126.502, NA), mean1 = c(131.146, 114.4, NA), mean2 = c(93.191, 85.7, NA) ) S2N <- getDataset( sampleSize1 = c(32, NA, NA), sampleSize2 = c(35, NA, NA), stDev1 = c(163.645, NA, NA), stDev2 = c(131.888, NA, NA), mean1 = c(123.594, NA, NA), mean2 = c(78.26, NA, NA) ) F <- getDataset( sampleSize1 = c(69, NA, NA), sampleSize2 = c(80, NA, NA), stDev1 = c(140.4682, NA, NA), stDev2 = c(143.9796, NA, NA), mean1 = c(129.2957, NA, NA), mean2 = c(82.1875, NA, NA) ) expect_error(getDataset(S1 = S1N, S2 = S2N, F = F), paste0( "Conflicting arguments: 'stDev' F (140.468) must ", "be > 'stDev' S1 (156.503) in group 1 at stage 1" ), fixed = TRUE ) }) test_that("Illegal creation of a dataset of means with subsets: too small sample size in F (two subsets)", { .skipTestIfDisabled() S1N <- getDataset( sampleSize1 = c(39, 34, NA), sampleSize2 = c(33, 45, NA), stDev1 = c(156.5026, 120.084, NA), stDev2 = c(134.0254, 126.502, NA), mean1 = c(131.146, 114.4, NA), mean2 = c(93.191, 85.7, NA) ) S2N <- getDataset( sampleSize1 = c(32, NA, NA), sampleSize2 = c(35, NA, NA), stDev1 = c(163.645, NA, NA), stDev2 = c(131.888, NA, NA), mean1 = c(123.594, NA, NA), mean2 = c(78.26, NA, NA) ) F <- getDataset( sampleSize1 = c(30, NA, NA), sampleSize2 = c(80, NA, NA), stDev1 = c(164.4682, NA, NA), stDev2 = c(143.9796, NA, NA), mean1 = c(129.2957, NA, NA), mean2 = c(82.1875, NA, NA) ) expect_error(getDataset(S1 = S1N, S2 = S2N, F = F), paste0( "Conflicting arguments: 'sampleSize' F (30) must ", "be >= 'sampleSize' S1 (39) in group 1 at stage 1" ), fixed = TRUE ) }) test_that("Illegal creation of a dataset of means with subsets: wrong deselection (three subsets)", { .skipTestIfDisabled() S1 <- getDataset( sampleSize2 = c(12, 33, 21), sampleSize1 = c(18, 17, 23), mean2 = c(107.7, 77.7, 84.9), mean1 = c(125.6, 111.1, 99.9), stDev2 = c(128.5, 133.3, 84.9), stDev1 = c(120.1, 145.6, 74.3) ) S2 <- getDataset( sampleSize2 = c(14, NA, NA), sampleSize1 = c(11, NA, NA), mean2 = c(68.3, NA, NA), mean1 = c(100.1, NA, NA), stDev2 = c(124.0, NA, NA), stDev1 = c(116.8, NA, NA) ) S12 <- getDataset( sampleSize2 = c(21, 12, 33), sampleSize1 = c(21, 17, 31), mean2 = c(84.9, 107.7, 77.7), mean1 = c(135.9, 117.7, 97.7), stDev2 = c(139.5, 107.7, 77.7), stDev1 = c(185.0, 92.3, 87.3) ) R <- getDataset( sampleSize2 = c(33, 33, NA), sampleSize1 = c(19, 19, NA), mean2 = c(77.1, 77.1, NA), mean1 = c(142.4, 142.4, NA), stDev2 = c(163.5, 163.5, NA), stDev1 = c(120.6, 120.6, NA) ) expect_error(getDataset(S1 = S1, S2 = S2, S12 = S12, R = R), paste0( "Conflicting arguments: if S2 is deselected (NA) then R also must be deselected ", "(NA) but, e.g., ", sQuote("sampleSize"), " R is 19 in group 1 at stage 2" ), fixed = TRUE ) }) test_that("Valid creation of a dataset of means with subsets: no error occurs", { .skipTestIfDisabled() S1 <- getDataset( sampleSize2 = c(12, 33, 21), sampleSize1 = c(18, 17, 23), mean2 = c(107.7, 77.7, 84.9), mean1 = c(125.6, 111.1, 99.9), stDev2 = c(128.5, 133.3, 84.9), stDev1 = c(120.1, 145.6, 74.3) ) S2 <- getDataset( sampleSize2 = c(14, 22, NA), sampleSize1 = c(11, 18, NA), mean2 = c(68.3, 127.4, NA), mean1 = c(100.1, 110.9, NA), stDev2 = c(124.0, 134.7, NA), stDev1 = c(116.8, 133.7, NA) ) S12 <- getDataset( sampleSize2 = c(21, NA, NA), sampleSize1 = c(21, NA, NA), mean2 = c(84.9, NA, NA), mean1 = c(135.9, NA, NA), stDev2 = c(139.5, NA, NA), stDev1 = c(185.0, NA, NA) ) R <- getDataset( sampleSize2 = c(33, 33, NA), sampleSize1 = c(19, 19, NA), mean2 = c(77.1, 77.1, NA), mean1 = c(142.4, 142.4, NA), stDev2 = c(163.5, 163.5, NA), stDev1 = c(120.6, 120.6, NA) ) expect_error(getDataset(S1 = S1, S2 = S2, S12 = S12, R = R), NA) }) test_that("Illegal creation of a dataset of rates with subsets: too small number of events in F (one subset)", { .skipTestIfDisabled() S1 <- getDataset( sampleSize1 = c(22, 31, 37), sampleSize2 = c(28, 33, 39), events1 = c(17, 16, 17), events2 = c(18, 21, 19) ) F <- getDataset( sampleSize1 = c(46, 54, NA), sampleSize2 = c(49, 62, NA), events1 = c(16, 31, NA), events2 = c(29, 35, NA) ) expect_error(getDataset(S1 = S1, F = F), paste0("Conflicting arguments: 'event' F (16) must be >= 'event' S1 (17) in group 1 at stage 1"), fixed = TRUE ) }) test_that("Illegal creation of a dataset of rates with subsets: too small sample size in F (one subset)", { .skipTestIfDisabled() S1 <- getDataset( sampleSize1 = c(22, 31, 37), sampleSize2 = c(28, 33, 39), events1 = c(7, 16, 17), events2 = c(18, 21, 19) ) F <- getDataset( sampleSize1 = c(46, 29, NA), sampleSize2 = c(49, 62, NA), events1 = c(16, 31, NA), events2 = c(29, 35, NA) ) expect_error(getDataset(S1 = S1, F = F), paste0("Conflicting arguments: 'sampleSize' F (29) must be >= 'sampleSize' S1 (31) in group 1 at stage 2"), fixed = TRUE ) }) test_that("Illegal creation of a dataset of rates with subsets: wrong deselection (one subset)", { .skipTestIfDisabled() S1 <- getDataset( sampleSize1 = c(22, 31, NA), sampleSize2 = c(28, 33, NA), events1 = c(7, 16, NA), events2 = c(18, 21, NA) ) R <- getDataset( sampleSize1 = c(24, 23, 37), sampleSize2 = c(21, 29, 39), events1 = c(9, 15, 10), events2 = c(11, 14, 19) ) expect_error(getDataset(S1 = S1, R = R), paste0( "Conflicting arguments: if S1 is deselected (NA) then R also must be ", "deselected (NA) but, e.g., ", sQuote("sampleSize"), " R is 37 in group 1 at stage 3" ), fixed = TRUE ) }) test_that("Illegal creation of a dataset of rates with subsets: too small sample size in F (three subsets)", { .skipTestIfDisabled() S1 <- getDataset( sampleSize1 = c(84, 94, 25), sampleSize2 = c(82, 75, 23), events1 = c(21, 28, 13), events2 = c(32, 23, 20) ) S2 <- getDataset( sampleSize1 = c(81, 95, NA), sampleSize2 = c(84, 64, NA), events1 = c(26, 29, NA), events2 = c(31, 26, NA) ) S3 <- getDataset( sampleSize1 = c(271, NA, NA), sampleSize2 = c(74, NA, NA), events1 = c(16, NA, NA), events2 = c(21, NA, NA) ) F <- getDataset( sampleSize1 = c(248, NA, NA), sampleSize2 = c(254, NA, NA), events1 = c(75, NA, NA), events2 = c(98, NA, NA) ) expect_error(getDataset(S1 = S1, S2 = S2, S3 = S3, F = F), paste0( "Conflicting arguments: 'sampleSize' F (248) must ", "be >= 'sampleSize' S3 (271) in group 1 at stage 1" ), fixed = TRUE ) }) test_that("Illegal creation of a dataset of rates with subsets: wrong deselection (three subsets)", { .skipTestIfDisabled() S1 <- getDataset( sampleSize1 = c(47, 33, 37), sampleSize2 = c(48, 47, 39), events1 = c(18, 13, 17), events2 = c(12, 11, 9) ) S2 <- getDataset( sampleSize1 = c(49, NA, NA), sampleSize2 = c(45, NA, NA), events1 = c(12, NA, NA), events2 = c(13, NA, NA) ) S12 <- getDataset( sampleSize1 = c(35, 42, NA), sampleSize2 = c(36, 47, NA), events1 = c(19, 10, NA), events2 = c(13, 17, NA) ) R <- getDataset( sampleSize1 = c(43, 43, 43), sampleSize2 = c(39, 39, 39), events1 = c(17, 17, 17), events2 = c(14, 14, 14) ) expect_error(getDataset(S1 = S1, S2 = S2, S12 = S12, R = R), paste0( "Conflicting arguments: if S2 is deselected (NA) then R also must be ", "deselected (NA) but, e.g., ", sQuote("sampleSize"), " R is 43 in group 1 at stage 2" ), fixed = TRUE ) }) test_that("Creation of a dataset of rates with subsets: empty subsets", { .skipTestIfDisabled() S1 <- getDataset( sampleSize1 = c(84, 94, 25), sampleSize2 = c(82, 75, 23), events1 = c(21, 28, 13), events2 = c(32, 23, 20) ) S2 <- getDataset( sampleSize1 = c(81, 95, NA), sampleSize2 = c(84, 64, NA), events1 = c(26, 29, NA), events2 = c(31, 26, NA) ) S3 <- getDataset( sampleSize1 = c(71, NA, NA), sampleSize2 = c(74, NA, NA), events1 = c(16, NA, NA), events2 = c(21, NA, NA) ) R <- getDataset( sampleSize1 = c(12, NA, NA), sampleSize2 = c(14, NA, NA), events1 = c(12, NA, NA), events2 = c(14, NA, NA) ) expect_warning(getDataset(S1 = S1, S2 = S2, S3 = S3, R = R), "The 4 undefined subsets S12, S13, S23, S123 were defined as empty subsets", fixed = TRUE ) }) test_that("Illegal creation of a dataset of rates with subsets: wrong deselection (R)", { .skipTestIfDisabled() S1 <- getDataset( sampleSize1 = c(84, 94, 25), sampleSize2 = c(82, 75, 23), events1 = c(21, 28, 13), events2 = c(32, 23, 20) ) S2 <- getDataset( sampleSize1 = c(81, 95, NA), sampleSize2 = c(84, 64, NA), events1 = c(26, 29, NA), events2 = c(31, 26, NA) ) S3 <- getDataset( sampleSize1 = c(71, NA, NA), sampleSize2 = c(74, NA, NA), events1 = c(16, NA, NA), events2 = c(21, NA, NA) ) R <- getDataset( sampleSize1 = c(12, 95, NA), sampleSize2 = c(14, 64, NA), events1 = c(12, 29, NA), events2 = c(14, 26, NA) ) expect_warning(expect_error(getDataset(S1 = S1, S2 = S2, S3 = S3, R = R), paste0( "Conflicting arguments: if S3 is deselected (NA) then R also must be ", "deselected (NA) but, e.g., ", sQuote("sampleSize"), " R is 95 in group 1 at stage 2" ), fixed = TRUE )) }) test_that("Illegal creation of a dataset of survival data with subsets: too small number of events (one group)", { .skipTestIfDisabled() S1 <- getDataset( events = c(37, 56, 22), logRanks = c(1.66, 1.38, 1.22), allocationRatios = c(1, 1, 1) ) F <- getDataset( events = c(66, 55, NA), logRanks = c(1.98, 1.57, NA), allocationRatios = c(1, 1, NA) ) expect_error(getDataset(S1 = S1, F = F), paste0( "Conflicting arguments: 'event' F (55) must be >= ", "'event' S1 (56) in group 1 at stage 2" ), fixed = TRUE ) }) test_that("Illegal creation of a dataset of survival data with subsets: wrong deselection (one group)", { .skipTestIfDisabled() S1 <- getDataset( overallExpectedEvents = c(13.3, NA, NA), overallEvents = c(16, NA, NA), overallVarianceEvents = c(2.9, NA, NA), overallAllocationRatios = c(1, NA, NA) ) R <- getDataset( overallExpectedEvents = c(23.4, 35.4, 43.7), overallEvents = c(27, 38, 47), overallVarianceEvents = c(3.8, 4.7, 3.4), overallAllocationRatios = c(1, 1, 1) ) expect_error(getDataset(S1 = S1, R = R), paste0( "Conflicting arguments: if S1 is deselected (NA) then R also must ", "be deselected (NA) but, e.g., ", sQuote("overallEvent"), " R is 38 in group 1 at stage 2" ), fixed = TRUE ) }) test_that("Creation of a dataset of survival data with subsets: no error occurs", { .skipTestIfDisabled() S1 <- getDataset( events = c(37, 13, 26), logRanks = -c(1.66, 1.239, 0.785) ) S2 <- getDataset( events = c(31, 18, NA), logRanks = -c(1.98, 1.064, NA) ) F <- getDataset( events = c(37, NA, NA), logRanks = -c(2.18, NA, NA) ) expect_error(getDataset(S1 = S1, S2 = S2, F = F), NA) }) test_that("Illegal creation of a dataset of survival data with subsets: too small number of events (two groups)", { .skipTestIfDisabled() S1 <- getDataset( events = c(37, 13, 26), logRanks = -c(1.66, 1.239, 0.785) ) S2 <- getDataset( events = c(31, 18, NA), logRanks = -c(1.98, 1.064, NA) ) F <- getDataset( events = c(30, NA, NA), logRanks = -c(2.18, NA, NA) ) expect_error(getDataset(S1 = S1, S2 = S2, F = F), paste0( "Conflicting arguments: 'event' F (30) must be ", ">= 'event' S1 (37) in group 1 at stage 1" ), fixed = TRUE ) }) test_that("Illegal creation of a dataset of survival data with subsets: inconsistent deselection", { .skipTestIfDisabled() expect_error(getDataset( overallExpectedEvents = c(13.4, 35.4, 43.7), overallEvents = c(16, 37, 47), overallVarianceEvents = c(2.8, 4.7, 3.4), overallAllocationRatios = c(1, 1, NA) ), paste0( "Conflicting arguments: values of treatment 1 not correctly specified; if NA's exist, then they are ", "mandatory for each parameter at the same stage" ), fixed = TRUE) S1 <- getDataset( overallExpectedEvents = c(13.4, 35.4, 43.7), overallEvents = c(16, 37, 47), overallVarianceEvents = c(2.8, 4.7, 3.4), overallAllocationRatios = c(1, 1, 1) ) expect_error(getDataset( overallExpectedEvents = c(11.5, 31.1, NA), overallEvents = c(15, 33, NA), overallVarianceEvents = c(2.2, 4.4, NA), overallAllocationRatios = c(1, 1, 1) ), paste0( "Conflicting arguments: values of treatment 1 not correctly specified; if NA's exist, then they are ", "mandatory for each parameter at the same stage" ), fixed = TRUE) S2 <- getDataset( overallExpectedEvents = c(11.5, 31.1, NA), overallEvents = c(15, 33, NA), overallVarianceEvents = c(2.2, 4.4, NA), overallAllocationRatios = c(1, 1, NA) ) S12 <- getDataset( overallExpectedEvents = c(10.1, 29.6, 39.1), overallEvents = c(11, 31, 42), overallVarianceEvents = c(2.8, 4.7, 3.4), overallAllocationRatios = c(1, 1, 1) ) R <- getDataset( overallExpectedEvents = c(23.3, NA, NA), overallEvents = c(25, NA, NA), overallVarianceEvents = c(3.9, NA, NA), overallAllocationRatios = c(1, NA, NA) ) expect_error(getDataset(S1 = S1, S2 = S2, S12 = S12, R = R), NA) }) test_that("Usage of the forward pipe operator", { .skipTestIfDisabled() .skipTestIfPipeOperatorNotAvailable() analysisResults <- getDesignGroupSequential(informationRates = c(20, 50, 80) / 80) |> getDataset( n = c(20, 30, 30), means = c(45, 51, 45), stDevs = c(130, 140, 120) ) |> getAnalysisResults() ## Comparison of the results of AnalysisResultsGroupSequential object 'analysisResults' with expected results expect_equal(analysisResults$thetaH1, 47.25, tolerance = 1e-07) expect_equal(analysisResults$assumedStDev, 128.66279, tolerance = 1e-07) expect_equal(analysisResults$testActions, c("continue", "continue", "reject")) expect_equal(analysisResults$conditionalRejectionProbabilities, c(0.081070604, 0.46575384, NA_real_), tolerance = 1e-07) expect_equal(analysisResults$conditionalPower, c(NA_real_, NA_real_, NA_real_)) expect_equal(analysisResults$repeatedConfidenceIntervalLowerBounds, c(-102.89842, -1.4031361, 18.073438), tolerance = 1e-07) expect_equal(analysisResults$repeatedConfidenceIntervalUpperBounds, c(192.89842, 98.603136, 76.426562), tolerance = 1e-07) expect_equal(analysisResults$repeatedPValues, c(0.29621451, 0.028427113, 0.00077442832), tolerance = 1e-07) expect_equal(analysisResults$finalStage, 3) expect_equal(analysisResults$finalPValues, c(NA_real_, NA_real_, 0.0060885604), tolerance = 1e-07) expect_equal(analysisResults$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 9.555415), tolerance = 1e-07) expect_equal(analysisResults$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 71.404491), tolerance = 1e-07) expect_equal(analysisResults$medianUnbiasedEstimates, c(NA_real_, NA_real_, 41.616412), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(analysisResults), NA))) expect_output(print(analysisResults)$show()) invisible(capture.output(expect_error(summary(analysisResults), NA))) expect_output(summary(analysisResults)$show()) analysisResultsCodeBased <- eval(parse(text = getObjectRCode(analysisResults, stringWrapParagraphWidth = NULL))) expect_equal(analysisResultsCodeBased$thetaH1, analysisResults$thetaH1, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$assumedStDev, analysisResults$assumedStDev, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$testActions, analysisResults$testActions, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$conditionalRejectionProbabilities, analysisResults$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$conditionalPower, analysisResults$conditionalPower, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$repeatedConfidenceIntervalLowerBounds, analysisResults$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$repeatedConfidenceIntervalUpperBounds, analysisResults$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$repeatedPValues, analysisResults$repeatedPValues, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$finalStage, analysisResults$finalStage, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$finalPValues, analysisResults$finalPValues, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$finalConfidenceIntervalLowerBounds, analysisResults$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$finalConfidenceIntervalUpperBounds, analysisResults$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$medianUnbiasedEstimates, analysisResults$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(analysisResults), "character") df <- as.data.frame(analysisResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(analysisResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } finalConfidenceInterval <- getDesignGroupSequential() |> getDataset( n = c(20, 30, 30), means = c(45, 51, 45), stDevs = c(130, 140, 120) ) |> getFinalConfidenceInterval() ## Comparison of the results of list object 'finalConfidenceInterval' with expected results expect_equal(finalConfidenceInterval$stage, 3) expect_equal(finalConfidenceInterval$thetaH0, 0) expect_equal(finalConfidenceInterval$directionUpper, TRUE) expect_equal(finalConfidenceInterval$normalApproximation, FALSE) expect_equal(finalConfidenceInterval$equalVariances, TRUE) expect_equal(finalConfidenceInterval$tolerance, 1e-06, tolerance = 1e-07) expect_equal(finalConfidenceInterval$finalStage, 2) expect_equal(finalConfidenceInterval$medianUnbiasedGeneral, 0.34738475, tolerance = 1e-07) expect_equal(finalConfidenceInterval$finalConfidenceIntervalGeneral, c(0.069908879, 0.62467355), tolerance = 1e-07) expect_equal(finalConfidenceInterval$medianUnbiased, 46.815656, tolerance = 1e-07) expect_equal(finalConfidenceInterval$finalConfidenceInterval, c(9.4213407, 84.184763), tolerance = 1e-07) suppressWarnings( stageResults <- getDataset( n = c(20, 30, 30), means = c(45, 51, 45), stDevs = c(130, 140, 120) ) |> getStageResults() ) ## Comparison of the results of StageResultsMeans object 'stageResults' with expected results expect_equal(stageResults$overallTestStatistics, 1.5480471, tolerance = 1e-07) expect_equal(stageResults$overallPValues, 0.0690533, tolerance = 1e-07) expect_equal(stageResults$overallMeans, 45) expect_equal(stageResults$overallStDevs, 130) expect_equal(stageResults$overallSampleSizes, 20) expect_equal(stageResults$testStatistics, 1.5480471, tolerance = 1e-07) expect_equal(stageResults$pValues, 0.0690533, tolerance = 1e-07) expect_equal(stageResults$effectSizes, 45) expect_equal(stageResults$combInverseNormal, 1.4828789, tolerance = 1e-07) expect_equal(stageResults$weightsInverseNormal, 1) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(stageResults), NA))) expect_output(print(stageResults)$show()) invisible(capture.output(expect_error(summary(stageResults), NA))) expect_output(summary(stageResults)$show()) suppressWarnings(stageResultsCodeBased <- eval(parse(text = getObjectRCode(stageResults, stringWrapParagraphWidth = NULL)))) expect_equal(stageResultsCodeBased$overallTestStatistics, stageResults$overallTestStatistics, tolerance = 1e-05) expect_equal(stageResultsCodeBased$overallPValues, stageResults$overallPValues, tolerance = 1e-05) expect_equal(stageResultsCodeBased$overallMeans, stageResults$overallMeans, tolerance = 1e-05) expect_equal(stageResultsCodeBased$overallStDevs, stageResults$overallStDevs, tolerance = 1e-05) expect_equal(stageResultsCodeBased$overallSampleSizes, stageResults$overallSampleSizes, tolerance = 1e-05) expect_equal(stageResultsCodeBased$testStatistics, stageResults$testStatistics, tolerance = 1e-05) expect_equal(stageResultsCodeBased$pValues, stageResults$pValues, tolerance = 1e-05) expect_equal(stageResultsCodeBased$effectSizes, stageResults$effectSizes, tolerance = 1e-05) expect_equal(stageResultsCodeBased$combInverseNormal, stageResults$combInverseNormal, tolerance = 1e-05) expect_equal(stageResultsCodeBased$weightsInverseNormal, stageResults$weightsInverseNormal, tolerance = 1e-05) expect_type(names(stageResults), "character") df <- as.data.frame(stageResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(stageResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) rpact/tests/testthat/test-f_parameter_set_utilities.R0000644000176200001440000001056314370207346022733 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_parameter_set_utilities.R ## | Creation date: 06 February 2023, 12:13:45 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Parameter Set Utility Functions") test_that("'.getParameterValueFormatted' produce correct results if parameter is an array", { x1 <- getSimulationMultiArmMeans(getDesignInverseNormal(kMax = 2), plannedSubjects = c(30, 60), muMaxVector = 0, seed = 123, maxNumberOfIterations = 50L) y1 <- .getParameterValueFormatted(x1, "sampleSizes") expect_equal("sampleSizes", y1$paramName) expect_equal(c(x1$.design$kMax, length(x1$muMaxVector), x1$activeArms + 1), dim(y1$paramValue)) expect_equal(length(as.vector(y1$paramValue)), length(y1$paramValueFormatted)) expect_equal("character", class(y1$paramValueFormatted)[1]) expect_equal("array", y1$type) x2 <- getSimulationMultiArmMeans(getDesignInverseNormal(kMax = 2), plannedSubjects = c(50, 100), muMaxVector = c(0, 1), seed = 123, maxNumberOfIterations = 50L) lines2a <- capture.output(print(x2)) lines2 <- lines2a[grepl("Sample sizes ", lines2a)] expect_match(lines2[1], "^ *Sample sizes \\(1\\) \\[1\\] *: 50, 50 *$") expect_match(lines2[2], "^ *Sample sizes \\(1\\) \\[2\\] *: 17, 0 *$") expect_match(lines2[3], "^ *Sample sizes \\(2\\) \\[1\\] *: 50, 50 *$") expect_match(lines2[4], "^ *Sample sizes \\(2\\) \\[2\\] *: 17, 3.3 *$") expect_match(lines2[5], "^ *Sample sizes \\(3\\) \\[1\\] *: 50, 50 *$") expect_match(lines2[6], "^ *Sample sizes \\(3\\) \\[2\\] *: 16, 46.7 *$") expect_match(lines2[7], "^ *Sample sizes \\(4\\) \\[1\\] *: 50, 50 *$") expect_match(lines2[8], "^ *Sample sizes \\(4\\) \\[2\\] *: 50, 50 *$") x3 <- getSimulationMultiArmMeans(getDesignInverseNormal(kMax = 1), plannedSubjects = 50, muMaxVector = c(0, 1), seed = 123, maxNumberOfIterations = 50L) y3 <- .getParameterValueFormatted(x3, "sampleSizes") expect_equal("sampleSizes", y3$paramName) expect_equal(c(x3$.design$kMax, length(x3$muMaxVector), x3$activeArms + 1), dim(y3$paramValue)) expect_equal(length(as.vector(y3$paramValue)), length(y3$paramValueFormatted) * 2) expect_equal("character", class(y3$paramValueFormatted)[1]) expect_equal("array", y3$type) lines3a <- capture.output(print(x3)) lines3 <- lines3a[grepl("Sample sizes ", lines3a)] expect_match(lines3[1], "^ *Sample sizes \\(1\\) *: 50, 50 *$") expect_match(lines3[2], "^ *Sample sizes \\(2\\) *: 50, 50 *$") expect_match(lines3[3], "^ *Sample sizes \\(3\\) *: 50, 50 *$") expect_match(lines3[4], "^ *Sample sizes \\(4\\) *: 50, 50 *$") x4 <- getSimulationMultiArmMeans(getDesignInverseNormal(kMax = 2), plannedSubjects = c(50, 100), muMaxVector = 0, seed = 123, maxNumberOfIterations = 50L) y4 <- .getParameterValueFormatted(x4, "sampleSizes") expect_equal("sampleSizes", y4$paramName) expect_equal(c(x4$.design$kMax, length(x4$muMaxVector), x4$activeArms + 1), dim(y4$paramValue)) expect_equal(length(as.vector(y4$paramValue)), length(y4$paramValueFormatted)) expect_equal("character", class(y4$paramValueFormatted)[1]) expect_equal("array", y4$type) lines4a <- capture.output(print(x4)) lines4 <- lines4a[grepl("Sample sizes ", lines4a)] expect_match(lines4[1], "^ *Sample sizes \\(1\\) \\[1\\] *: 50 *$") expect_match(lines4[2], "^ *Sample sizes \\(1\\) \\[2\\] *: 17 *$") expect_match(lines4[3], "^ *Sample sizes \\(2\\) \\[1\\] *: 50 *$") expect_match(lines4[4], "^ *Sample sizes \\(2\\) \\[2\\] *: 17 *$") expect_match(lines4[5], "^ *Sample sizes \\(3\\) \\[1\\] *: 50 *$") expect_match(lines4[6], "^ *Sample sizes \\(3\\) \\[2\\] *: 16 *$") expect_match(lines4[7], "^ *Sample sizes \\(4\\) \\[1\\] *: 50 *$") expect_match(lines4[8], "^ *Sample sizes \\(4\\) \\[2\\] *: 50 *$") }) rpact/tests/testthat/test-f_analysis_base_survival.R0000644000176200001440000017450014370207346022557 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_analysis_base_survival.R ## | Creation date: 06 February 2023, 12:06:52 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Analysis Survival Functionality for the Group Sequential Design") test_that("'getAnalysisResults' for a two-stage group sequential design and survival data", { .skipTestIfDisabled() design0 <- getDesignGroupSequential( kMax = 2, alpha = 0.025, informationRates = c(0.4, 1), bindingFutility = TRUE, typeOfDesign = "WT", deltaWT = 0.25, futilityBounds = 0 ) dataExample0 <- getDataset( overallEvents = c(8, 20), overallLogRanks = c(1.92, 2.1) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeGreater} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} # @refFS[Formula]{fs:finalCISurvival} # @refFS[Formula]{fs:medianUnbiasedEstimate} x0 <- getAnalysisResults(design0, dataExample0, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsGroupSequential object 'x0' with expected results expect_equal(x0$thetaH1, 2.5578027, tolerance = 1e-06) expect_equal(x0$testActions, c("continue", "reject")) expect_equal(x0$conditionalRejectionProbabilities, c(0.15200046, NA_real_), tolerance = 1e-06) expect_equal(x0$conditionalPower, c(NA_real_, NA_real_)) expect_equal(x0$repeatedConfidenceIntervalLowerBounds, c(0.65051922, 1.04083), tolerance = 1e-06) expect_equal(x0$repeatedConfidenceIntervalUpperBounds, c(23.22605, 6.2857086), tolerance = 1e-06) expect_equal(x0$repeatedPValues, c(0.074184316, 0.019962317), tolerance = 1e-06) expect_equal(x0$finalStage, 2) expect_equal(x0$finalPValues, c(NA_real_, 0.021122043), tolerance = 1e-06) expect_equal(x0$finalConfidenceIntervalLowerBounds, c(NA_real_, 1.0341796), tolerance = 1e-06) expect_equal(x0$finalConfidenceIntervalUpperBounds, c(NA_real_, 6.2409205), tolerance = 1e-06) expect_equal(x0$medianUnbiasedEstimates, c(NA_real_, 2.5476534), tolerance = 1e-06) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x0), NA))) expect_output(print(x0)$show()) invisible(capture.output(expect_error(summary(x0), NA))) expect_output(summary(x0)$show()) x0CodeBased <- eval(parse(text = getObjectRCode(x0, stringWrapParagraphWidth = NULL))) expect_equal(x0CodeBased$thetaH1, x0$thetaH1, tolerance = 1e-05) expect_equal(x0CodeBased$testActions, x0$testActions, tolerance = 1e-05) expect_equal(x0CodeBased$conditionalRejectionProbabilities, x0$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x0CodeBased$conditionalPower, x0$conditionalPower, tolerance = 1e-05) expect_equal(x0CodeBased$repeatedConfidenceIntervalLowerBounds, x0$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x0CodeBased$repeatedConfidenceIntervalUpperBounds, x0$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x0CodeBased$repeatedPValues, x0$repeatedPValues, tolerance = 1e-05) expect_equal(x0CodeBased$finalStage, x0$finalStage, tolerance = 1e-05) expect_equal(x0CodeBased$finalPValues, x0$finalPValues, tolerance = 1e-05) expect_equal(x0CodeBased$finalConfidenceIntervalLowerBounds, x0$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x0CodeBased$finalConfidenceIntervalUpperBounds, x0$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x0CodeBased$medianUnbiasedEstimates, x0$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x0), "character") df <- as.data.frame(x0) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x0) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' for a three-stage group sequential design and survival data", { .skipTestIfDisabled() design1 <- getDesignGroupSequential( kMax = 3, alpha = 0.025, informationRates = c(0.2, 0.4, 1), bindingFutility = FALSE, typeOfDesign = "WT", deltaWT = 0.25, futilityBounds = c(0, 0) ) dataExample1 <- getDataset( overallEvents = c(8, 15, 38), overallAllocationRatios = c(1, 1, 1), overallLogRanks = c(1.52, 1.38, 2.9) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeGreater} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x1 <- getAnalysisResults(design1, dataExample1, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results expect_equal(x1$thetaH1, 2.5622461, tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject")) expect_equal(x1$conditionalRejectionProbabilities, c(0.076909306, 0.067473058, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.34217973, 0.54553509, 1.325822), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(25.078822, 7.6235796, 4.9517237), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.22249182, 0.19345822, 0.0019646115), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.0074535505), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 1.222663), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 4.752454), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 2.4764002), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$thetaH1, x1$thetaH1, tolerance = 1e-05) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeGreater} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x2 <- getAnalysisResults(design1, dataExample1, stage = 2, nPlanned = 40, allocationRatioPlanned = 2, thetaH1 = 2, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results expect_equal(x2$testActions, c("continue", "continue", NA_character_)) expect_equal(x2$conditionalRejectionProbabilities, c(0.076909306, 0.067473058, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.70906065), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.34217973, 0.54553509, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(25.078822, 7.6235796, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.22249182, 0.19345822, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$testActions, x2$testActions, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalStage, x2$finalStage, tolerance = 1e-05) expect_equal(x2CodeBased$finalPValues, x2$finalPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalLowerBounds, x2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalUpperBounds, x2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$medianUnbiasedEstimates, x2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(1, 2.5, 0.05)) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.06476941, 0.085271856, 0.10901882, 0.13583313, 0.16543943, 0.19748538, 0.23156461, 0.26723929, 0.30406079, 0.34158746, 0.37939899, 0.41710731, 0.45436408, 0.49086519, 0.52635279, 0.5606151, 0.59348472, 0.62483573, 0.65458006, 0.68266335, 0.70906065, 0.73377215, 0.75681902, 0.77823954, 0.79808559, 0.81641944, 0.83331101, 0.84883539, 0.86307085, 0.87609709, 0.88799385), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.38589113, 0.43767503, 0.48942229, 0.54046191, 0.59019718, 0.63811141, 0.6837696, 0.72681695, 0.76697506, 0.8040366, 0.83785893, 0.86835727, 0.89549763, 0.91928996, 0.93978142, 0.95705021, 0.97119988, 0.98235405, 0.99065189, 0.99624395, 0.99928862, 0.99994909, 0.9983907, 0.99477877, 0.98927675, 0.98204476, 0.97323838, 0.96300773, 0.95149676, 0.93884271, 0.92517582), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, maximum number of remaining events = 40, allocation ratio = 2") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsGroupSequential} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeGreater} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x3 <- getAnalysisResults(design1, dataExample1, thetaH0 = 0.95, stage = 2, nPlanned = 40, allocationRatioPlanned = 2, thetaH1 = 2, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x3' with expected results expect_equal(x3$testActions, c("continue", "continue", NA_character_)) expect_equal(x3$conditionalRejectionProbabilities, c(0.083820262, 0.07871372, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.78366367), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.34217973, 0.54553509, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(25.078822, 7.6235796, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.20477831, 0.16773576, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$testActions, x3$testActions, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalStage, x3$finalStage, tolerance = 1e-05) expect_equal(x3CodeBased$finalPValues, x3$finalPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalLowerBounds, x3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalUpperBounds, x3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$medianUnbiasedEstimates, x3$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData2 <- testGetAnalysisResultsPlotData(x3, thetaRange = seq(1, 2.5, 0.05)) ## Comparison of the results of list object 'plotData2' with expected results expect_equal(plotData2$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.099931978, 0.12787889, 0.15919322, 0.19345089, 0.23014743, 0.26873157, 0.30863607, 0.34930399, 0.39020957, 0.43087349, 0.47087287, 0.50984669, 0.54749733, 0.58358921, 0.61794519, 0.65044149, 0.6810018, 0.70959089, 0.73620831, 0.7608822, 0.78366367, 0.80462154, 0.82383789, 0.841404, 0.85741704, 0.87197725, 0.88518567, 0.8971423, 0.90794467, 0.91768682, 0.92645845), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.38589113, 0.43767503, 0.48942229, 0.54046191, 0.59019718, 0.63811141, 0.6837696, 0.72681695, 0.76697506, 0.8040366, 0.83785893, 0.86835727, 0.89549763, 0.91928996, 0.93978142, 0.95705021, 0.97119988, 0.98235405, 0.99065189, 0.99624395, 0.99928862, 0.99994909, 0.9983907, 0.99477877, 0.98927675, 0.98204476, 0.97323838, 0.96300773, 0.95149676, 0.93884271, 0.92517582), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power with Likelihood") expect_equal(plotData2$xlab, "Hazard ratio") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, maximum number of remaining events = 40, allocation ratio = 2") }) test_that("'getAnalysisResults' for a three-stage ggroup sequential design and survival data ('directionUpper' reversed)", { .skipTestIfDisabled() design2 <- getDesignGroupSequential( kMax = 3, alpha = 0.025, informationRates = c(0.2, 0.4, 1), bindingFutility = FALSE, typeOfDesign = "WT", deltaWT = 0.25, futilityBounds = c(0, 0) ) dataExample2 <- getDataset( overallEvents = c(8, 15, 40), overallAllocationRatios = c(1, 1, 1), overallLogRanks = -c(1.52, 1.38, 2.9) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeSmaller} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x1 <- getAnalysisResults(design2, dataExample2, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsGroupSequential object 'x1' with expected results expect_equal(x1$thetaH1, 0.3996922, tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject")) expect_equal(x1$conditionalRejectionProbabilities, c(0.076909306, 0.067473058, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.039874281, 0.13117197, 0.21029804), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(2.9224407, 1.8330627, 0.75965452), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.22249182, 0.19345822, 0.0019646115), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.0074535505), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.21888803), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.82206073), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.41319107), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$thetaH1, x1$thetaH1, tolerance = 1e-05) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeSmaller} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x2 <- getAnalysisResults(design2, dataExample2, thetaH0 = 1.1, stage = 2, nPlanned = 40, allocationRatioPlanned = 0.5, thetaH1 = 0.5, directionUpper = FALSE ) ## Comparison of the results of AnalysisResultsGroupSequential object 'x2' with expected results expect_equal(x2$testActions, c("continue", "continue", NA_character_)) expect_equal(x2$conditionalRejectionProbabilities, c(0.090220506, 0.08944509, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.83779047), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.039874281, 0.13117197, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(2.9224407, 1.8330627, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.19034734, 0.14768766, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$testActions, x2$testActions, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalStage, x2$finalStage, tolerance = 1e-05) expect_equal(x2CodeBased$finalPValues, x2$finalPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalLowerBounds, x2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalUpperBounds, x2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$medianUnbiasedEstimates, x2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(0.4, 1, 0.05)) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.95060038, 0.90312097, 0.83779047, 0.7584288, 0.67069735, 0.58050999, 0.49291957, 0.41159422, 0.33875526, 0.27538378, 0.22153368, 0.17664644, 0.1398156), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.92517582, 0.98626675, 0.99928862, 0.9755955, 0.92648393, 0.86161675, 0.78854281, 0.71277663, 0.63811141, 0.56698955, 0.50084781, 0.44040564, 0.38589113), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, maximum number of remaining events = 40, allocation ratio = 0.5") }) test_plan_section("Testing the Analysis Survival Functionality for the Inverse Normal Design") test_that("'getAnalysisResults' for a three-stage inverse normal design and survival data", { .skipTestIfDisabled() design3 <- getDesignInverseNormal( kMax = 3, alpha = 0.025, informationRates = c(0.4, 0.6, 1), bindingFutility = FALSE, typeOfDesign = "WT", deltaWT = 0.25, futilityBounds = c(0.2, 0.2) ) dataExample3 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = c(1.52, 1.38, 2.9) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeGreater} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x1 <- getAnalysisResults(design3, dataExample3, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results expect_equal(x1$thetaH1, 2.9359555, tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject")) expect_equal(x1$conditionalRejectionProbabilities, c(0.088442162, 0.068047477, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.46058716, 0.62720212, 1.3462647), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(18.631576, 7.3754243, 6.4004419), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.16451426, 0.14162994, 0.0024185596), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.012073682), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 1.1608546), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 5.9479756), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 2.7535435), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$thetaH1, x1$thetaH1, tolerance = 1e-05) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeGreater} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x2 <- getAnalysisResults(design3, stage = 1, nPlanned = c(20, 40), allocationRatioPlanned = 2, thetaH1 = 2, dataExample3, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results expect_equal(x2$testActions, c("continue", NA_character_, NA_character_)) expect_equal(x2$conditionalRejectionProbabilities, c(0.088442162, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, 0.31420758, 0.86797577), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.46058716, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(18.631576, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.16451426, NA_real_, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$testActions, x2$testActions, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalStage, x2$finalStage, tolerance = 1e-05) expect_equal(x2CodeBased$finalPValues, x2$finalPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalLowerBounds, x2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalUpperBounds, x2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$medianUnbiasedEstimates, x2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(1, 2.5, 0.05)) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.088421701, 0.1185973, 0.15385139, 0.19371622, 0.23749985, 0.28435066, 0.33332759, 0.38346727, 0.43384172, 0.48360335, 0.53201578, 0.57847144, 0.62249749, 0.66375267, 0.70201741, 0.73717966, 0.7692185, 0.79818706, 0.82419601, 0.84739829, 0.86797577, 0.88612785, 0.90206209, 0.91598687, 0.92810573, 0.93861331, 0.9476925, 0.95551278, 0.96222928, 0.96798255, 0.97289882), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.31499453, 0.34899387, 0.38312084, 0.41715372, 0.4508945, 0.48416833, 0.51682261, 0.54872573, 0.57976566, 0.60984848, 0.63889683, 0.66684839, 0.69365439, 0.71927824, 0.74369416, 0.76688598, 0.78884594, 0.80957369, 0.82907527, 0.84736227, 0.86445102, 0.88036187, 0.89511858, 0.90874767, 0.92127801, 0.93274029, 0.94316663, 0.95259025, 0.96104517, 0.96856586, 0.97518711), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 1, maximum number of remaining events = 60, allocation ratio = 2") # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeGreater} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x3 <- getAnalysisResults(design3, dataExample3, thetaH0 = 0.95, stage = 2, nPlanned = 40, allocationRatioPlanned = 2, thetaH1 = 2, directionUpper = TRUE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x3' with expected results expect_equal(x3$testActions, c("continue", "continue", NA_character_)) expect_equal(x3$conditionalRejectionProbabilities, c(0.1007598, 0.085347867, NA_real_), tolerance = 1e-07) expect_equal(x3$conditionalPower, c(NA_real_, NA_real_, 0.80220427), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalLowerBounds, c(0.46058716, 0.62720212, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedConfidenceIntervalUpperBounds, c(18.631576, 7.3754243, NA_real_), tolerance = 1e-07) expect_equal(x3$repeatedPValues, c(0.14859365, 0.12054424, NA_real_), tolerance = 1e-07) expect_equal(x3$finalStage, NA_integer_) expect_equal(x3$finalPValues, c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x3$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$testActions, x3$testActions, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalRejectionProbabilities, x3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPower, x3$conditionalPower, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalLowerBounds, x3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedConfidenceIntervalUpperBounds, x3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$repeatedPValues, x3$repeatedPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalStage, x3$finalStage, tolerance = 1e-05) expect_equal(x3CodeBased$finalPValues, x3$finalPValues, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalLowerBounds, x3$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x3CodeBased$finalConfidenceIntervalUpperBounds, x3$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x3CodeBased$medianUnbiasedEstimates, x3$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData2 <- testGetAnalysisResultsPlotData(x3, thetaRange = seq(1, 2.5, 0.05)) ## Comparison of the results of list object 'plotData2' with expected results expect_equal(plotData2$xValues, c(1, 1.05, 1.1, 1.15, 1.2, 1.25, 1.3, 1.35, 1.4, 1.45, 1.5, 1.55, 1.6, 1.65, 1.7, 1.75, 1.8, 1.85, 1.9, 1.95, 2, 2.05, 2.1, 2.15, 2.2, 2.25, 2.3, 2.35, 2.4, 2.45, 2.5), tolerance = 1e-07) expect_equal(plotData2$condPowerValues, c(0.11179361, 0.14195425, 0.17543978, 0.21175256, 0.25032518, 0.2905567, 0.33184453, 0.37361059, 0.415321, 0.45649966, 0.49673619, 0.53568916, 0.57308562, 0.60871782, 0.642438, 0.67415198, 0.70381218, 0.73141052, 0.75697152, 0.7805458, 0.80220427, 0.8220329, 0.84012825, 0.8565936, 0.87153581, 0.88506274, 0.89728115, 0.90829517, 0.91820505, 0.92710634, 0.93508929), tolerance = 1e-07) expect_equal(plotData2$likelihoodValues, c(0.38589113, 0.43767503, 0.48942229, 0.54046191, 0.59019718, 0.63811141, 0.6837696, 0.72681695, 0.76697506, 0.8040366, 0.83785893, 0.86835727, 0.89549763, 0.91928996, 0.93978142, 0.95705021, 0.97119988, 0.98235405, 0.99065189, 0.99624395, 0.99928862, 0.99994909, 0.9983907, 0.99477877, 0.98927675, 0.98204476, 0.97323838, 0.96300773, 0.95149676, 0.93884271, 0.92517582), tolerance = 1e-07) expect_equal(plotData2$main, "Conditional Power with Likelihood") expect_equal(plotData2$xlab, "Hazard ratio") expect_equal(plotData2$ylab, "Conditional power / Likelihood") expect_equal(plotData2$sub, "Stage = 2, maximum number of remaining events = 40, allocation ratio = 2") }) test_that("'getAnalysisResults' for a three-stage inverse normal design and survival data ('directionUpper' reversed)", { .skipTestIfDisabled() design4 <- getDesignInverseNormal( kMax = 3, alpha = 0.025, informationRates = c(0.4, 0.6, 1), bindingFutility = FALSE, typeOfDesign = "WT", deltaWT = 0.25, futilityBounds = c(0.2, 0.2) ) dataExample4 <- getDataset( overallEvents = c(8, 15, 29), overallAllocationRatios = c(1, 1, 1), overallLogRanks = -c(1.52, 1.38, 2.9) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeSmaller} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x1 <- getAnalysisResults(design4, dataExample4, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsInverseNormal object 'x1' with expected results expect_equal(x1$thetaH1, 0.34060461, tolerance = 1e-07) expect_equal(x1$testActions, c("continue", "continue", "reject")) expect_equal(x1$conditionalRejectionProbabilities, c(0.088442162, 0.068047477, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.053672215, 0.13558542, 0.1562393), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(2.1711417, 1.5943825, 0.74279586), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.16451426, 0.14162994, 0.0024185596), tolerance = 1e-07) expect_equal(x1$finalStage, 3) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, 0.012073682), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, 0.16812443), tolerance = 1e-07) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, 0.86143434), tolerance = 1e-07) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, 0.3631684), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$thetaH1, x1$thetaH1, tolerance = 1e-05) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getAnalysisResultsInverseNormal} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeSmaller} # @refFS[Formula]{fs:definitionRCIInverseNormal} # @refFS[Formula]{fs:calculationRepeatedpValue} x2 <- getAnalysisResults(design4, dataExample4, thetaH0 = 1.1, stage = 2, nPlanned = 40, allocationRatioPlanned = 0.5, thetaH1 = 0.5, directionUpper = FALSE ) ## Comparison of the results of AnalysisResultsInverseNormal object 'x2' with expected results expect_equal(x2$testActions, c("continue", "continue", NA_character_)) expect_equal(x2$conditionalRejectionProbabilities, c(0.11248903, 0.10265841, NA_real_), tolerance = 1e-07) expect_equal(x2$conditionalPower, c(NA_real_, NA_real_, 0.8608569), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalLowerBounds, c(0.053672215, 0.13558542, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedConfidenceIntervalUpperBounds, c(2.1711417, 1.5943825, NA_real_), tolerance = 1e-07) expect_equal(x2$repeatedPValues, c(0.13581063, 0.1043566, NA_real_), tolerance = 1e-07) expect_equal(x2$finalStage, NA_integer_) expect_equal(x2$finalPValues, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x2$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$testActions, x2$testActions, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalRejectionProbabilities, x2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPower, x2$conditionalPower, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalLowerBounds, x2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedConfidenceIntervalUpperBounds, x2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$repeatedPValues, x2$repeatedPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalStage, x2$finalStage, tolerance = 1e-05) expect_equal(x2CodeBased$finalPValues, x2$finalPValues, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalLowerBounds, x2$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x2CodeBased$finalConfidenceIntervalUpperBounds, x2$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x2CodeBased$medianUnbiasedEstimates, x2$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } plotData1 <- testGetAnalysisResultsPlotData(x2, thetaRange = seq(0.4, 1, 0.05)) ## Comparison of the results of list object 'plotData1' with expected results expect_equal(plotData1$xValues, c(0.4, 0.45, 0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1), tolerance = 1e-07) expect_equal(plotData1$condPowerValues, c(0.95989447, 0.91898875, 0.8608569, 0.78814959, 0.70560814, 0.61865802, 0.53228335, 0.45038602, 0.37558279, 0.3092947, 0.25198255, 0.20342172, 0.16295428), tolerance = 1e-07) expect_equal(plotData1$likelihoodValues, c(0.92517582, 0.98626675, 0.99928862, 0.9755955, 0.92648393, 0.86161675, 0.78854281, 0.71277663, 0.63811141, 0.56698955, 0.50084781, 0.44040564, 0.38589113), tolerance = 1e-07) expect_equal(plotData1$main, "Conditional Power with Likelihood") expect_equal(plotData1$xlab, "Hazard ratio") expect_equal(plotData1$ylab, "Conditional power / Likelihood") expect_equal(plotData1$sub, "Stage = 2, maximum number of remaining events = 40, allocation ratio = 0.5") }) test_plan_section("Testing the Analysis Survival Functionality for the Fisher Design") test_that("'getAnalysisResults' for a three-stage Fisher design and 'bindingFutility = TRUE'", { .skipTestIfDisabled() design5 <- getDesignFisher( kMax = 3, alpha = 0.025, informationRates = c(0.4, 0.6, 1), alpha0Vec = c(0.5, 0.4), bindingFutility = TRUE ) dataExample5 <- getDataset( overallEvents = c(8, 15), overallAllocationRatios = c(1, 1), overallLogRanks = c(1.52, 2) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeGreater} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} x1 <- getAnalysisResults(design5, dataExample5, thetaH1 = 2, allocationRatioPlanned = 2, nPlanned = 50, directionUpper = TRUE, seed = 123456789 ) ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results expect_equal(x1$testActions, c("continue", "continue", NA_character_)) expect_equal(x1$conditionalRejectionProbabilities, c(0.043454839, 0.062873928, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, 0.78212896), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.63614226, 0.82191364, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(13.489852, 9.7381024, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.094302989, 0.05707734, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' for a three-stage Fisher design and 'bindingFutility = TRUE' ('directionUpper' reversed)", { .skipTestIfDisabled() design6 <- getDesignFisher( kMax = 3, alpha = 0.025, informationRates = c(0.4, 0.6, 1), alpha0Vec = c(0.5, 0.4), bindingFutility = TRUE ) dataExample6 <- getDataset( overallEvents = c(8, 15), overallAllocationRatios = c(1, 1), overallLogRanks = -c(1.52, 2) ) # @refFS[Tab.]{fs:tab:output:getAnalysisResultsFisher} # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeSmaller} # @refFS[Formula]{fs:definitionRCIFisherCombination} # @refFS[Formula]{fs:calculationRepeatedpValue} x1 <- getAnalysisResults(design6, dataExample6, thetaH1 = 0.5, allocationRatioPlanned = 0.5, nPlanned = 50, directionUpper = FALSE, seed = 123456789 ) ## Comparison of the results of AnalysisResultsFisher object 'x1' with expected results expect_equal(x1$testActions, c("continue", "continue", NA_character_)) expect_equal(x1$conditionalRejectionProbabilities, c(0.043454839, 0.062873928, NA_real_), tolerance = 1e-07) expect_equal(x1$conditionalPower, c(NA_real_, NA_real_, 0.78212896), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalLowerBounds, c(0.074129584, 0.10268931, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedConfidenceIntervalUpperBounds, c(1.5719754, 1.2166725, NA_real_), tolerance = 1e-07) expect_equal(x1$repeatedPValues, c(0.094302989, 0.05707734, NA_real_), tolerance = 1e-07) expect_equal(x1$finalStage, NA_integer_) expect_equal(x1$finalPValues, c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(x1$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$testActions, x1$testActions, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalRejectionProbabilities, x1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPower, x1$conditionalPower, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalLowerBounds, x1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedConfidenceIntervalUpperBounds, x1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$repeatedPValues, x1$repeatedPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalStage, x1$finalStage, tolerance = 1e-05) expect_equal(x1CodeBased$finalPValues, x1$finalPValues, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalLowerBounds, x1$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(x1CodeBased$finalConfidenceIntervalUpperBounds, x1$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(x1CodeBased$medianUnbiasedEstimates, x1$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' with a dataset of survival data and without defining a design", { .skipTestIfDisabled() data <- getDataset( overallEvents = c(38), overallAllocationRatios = c(1), overallLogRanks = -c(1.72) ) # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeGreater} analysisResults1 <- getAnalysisResults(data, alpha = 0.05, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsInverseNormal object 'analysisResults1' with expected results expect_equal(analysisResults1$thetaH1, 0.57232877, tolerance = 1e-07) expect_equal(analysisResults1$testActions, "reject") expect_equal(analysisResults1$repeatedConfidenceIntervalLowerBounds, 0.33564434, tolerance = 1e-07) expect_equal(analysisResults1$repeatedConfidenceIntervalUpperBounds, 0.97591411, tolerance = 1e-07) expect_equal(analysisResults1$repeatedPValues, 0.042716221, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(analysisResults1), NA))) expect_output(print(analysisResults1)$show()) invisible(capture.output(expect_error(summary(analysisResults1), NA))) expect_output(summary(analysisResults1)$show()) analysisResults1CodeBased <- eval(parse(text = getObjectRCode(analysisResults1, stringWrapParagraphWidth = NULL))) expect_equal(analysisResults1CodeBased$thetaH1, analysisResults1$thetaH1, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$testActions, analysisResults1$testActions, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedConfidenceIntervalLowerBounds, analysisResults1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedConfidenceIntervalUpperBounds, analysisResults1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(analysisResults1CodeBased$repeatedPValues, analysisResults1$repeatedPValues, tolerance = 1e-05) expect_type(names(analysisResults1), "character") df <- as.data.frame(analysisResults1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(analysisResults1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:testStatisticSurvival} # @refFS[Formula]{fs:pValuesSurvivalAlternativeGreater} analysisResults2 <- getAnalysisResults(data, alpha = 0.05, sided = 2) ## Comparison of the results of AnalysisResultsInverseNormal object 'analysisResults2' with expected results expect_equal(analysisResults2$thetaH1, 0.57232877, tolerance = 1e-07) expect_equal(analysisResults2$testActions, "accept") expect_equal(analysisResults2$repeatedConfidenceIntervalLowerBounds, 0.3030255, tolerance = 1e-07) expect_equal(analysisResults2$repeatedConfidenceIntervalUpperBounds, 1.0809654, tolerance = 1e-07) expect_equal(analysisResults2$repeatedPValues, 0.085432442, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(analysisResults2), NA))) expect_output(print(analysisResults2)$show()) invisible(capture.output(expect_error(summary(analysisResults2), NA))) expect_output(summary(analysisResults2)$show()) analysisResults2CodeBased <- eval(parse(text = getObjectRCode(analysisResults2, stringWrapParagraphWidth = NULL))) expect_equal(analysisResults2CodeBased$thetaH1, analysisResults2$thetaH1, tolerance = 1e-05) expect_equal(analysisResults2CodeBased$testActions, analysisResults2$testActions, tolerance = 1e-05) expect_equal(analysisResults2CodeBased$repeatedConfidenceIntervalLowerBounds, analysisResults2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(analysisResults2CodeBased$repeatedConfidenceIntervalUpperBounds, analysisResults2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(analysisResults2CodeBased$repeatedPValues, analysisResults2$repeatedPValues, tolerance = 1e-05) expect_type(names(analysisResults2), "character") df <- as.data.frame(analysisResults2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(analysisResults2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getAnalysisResults' with a dataset of survival data and automatic boundary recalculation", { .skipTestIfDisabled() design <- getDesignGroupSequential(sided = 1, alpha = 0.025, typeOfDesign = "asOF") data <- getDataset(overallEvents = c(205, 285), overallLogRanks = c(1.87, 2.19)) analysisResults <- getAnalysisResults(design = design, dataInput = data, maxInformation = 387) ## Comparison of the results of AnalysisResultsGroupSequential object 'analysisResults' with expected results expect_equal(analysisResults$thetaH1, 1.2962154, tolerance = 1e-07) expect_equal(analysisResults$testActions, c("continue", "continue", NA_character_)) expect_equal(analysisResults$conditionalRejectionProbabilities, c(0.19266595, 0.39869438, NA_real_), tolerance = 1e-07) expect_equal(analysisResults$conditionalPower, c(NA_real_, NA_real_, NA_real_)) expect_equal(analysisResults$repeatedConfidenceIntervalLowerBounds, c(0.87000803, 0.97623896, NA_real_), tolerance = 1e-07) expect_equal(analysisResults$repeatedConfidenceIntervalUpperBounds, c(1.9380428, 1.7210688, NA_real_), tolerance = 1e-07) expect_equal(analysisResults$repeatedPValues, c(0.11586361, 0.037973374, NA_real_), tolerance = 1e-07) expect_equal(analysisResults$finalStage, NA_integer_) expect_equal(analysisResults$finalPValues, c(NA_real_, NA_real_, NA_real_)) expect_equal(analysisResults$finalConfidenceIntervalLowerBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(analysisResults$finalConfidenceIntervalUpperBounds, c(NA_real_, NA_real_, NA_real_)) expect_equal(analysisResults$medianUnbiasedEstimates, c(NA_real_, NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(analysisResults), NA))) expect_output(print(analysisResults)$show()) invisible(capture.output(expect_error(summary(analysisResults), NA))) expect_output(summary(analysisResults)$show()) analysisResultsCodeBased <- eval(parse(text = getObjectRCode(analysisResults, stringWrapParagraphWidth = NULL))) expect_equal(analysisResultsCodeBased$thetaH1, analysisResults$thetaH1, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$testActions, analysisResults$testActions, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$conditionalRejectionProbabilities, analysisResults$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$conditionalPower, analysisResults$conditionalPower, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$repeatedConfidenceIntervalLowerBounds, analysisResults$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$repeatedConfidenceIntervalUpperBounds, analysisResults$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$repeatedPValues, analysisResults$repeatedPValues, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$finalStage, analysisResults$finalStage, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$finalPValues, analysisResults$finalPValues, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$finalConfidenceIntervalLowerBounds, analysisResults$finalConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$finalConfidenceIntervalUpperBounds, analysisResults$finalConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(analysisResultsCodeBased$medianUnbiasedEstimates, analysisResults$medianUnbiasedEstimates, tolerance = 1e-05) expect_type(names(analysisResults), "character") df <- as.data.frame(analysisResults) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(analysisResults) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) rpact/tests/testthat/test-f_logger.R0000644000176200001440000000247114446750002017257 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_design_utilities.R ## | Creation date: 06 February 2023, 12:13:45 ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Logger Functions") test_that("Logger functions throw errors when arguments are missing or wrong", { currentLogLevel <- getLogLevel() tryCatch({ setLogLevel(C_LOG_LEVEL_TRACE) expect_error(.logBase()) expect_error(.logInfo()) expect_error(.getRuntimeString()) expect_error(.logProgress()) expect_no_error(setLogLevel()) expect_no_error(resetLogLevel()) }, finally = function() { setLogLevel(currentLogLevel) }) }) rpact/tests/testthat/test-f_core_assertions.R0000644000176200001440000002637314370207346021215 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_core_assertions.R ## | Creation date: 06 February 2023, 12:11:54 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Assertion Functions") test_that("Testing '.assertIsInClosedInterval'", { invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 0, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 1, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 0.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = 0.9999, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = c(1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInClosedInterval(x = c(NA_real_, 0, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) expect_error(.assertIsInClosedInterval(x = -0.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = -0.0001, xName = "x", lower = 0, upper = NA_real_, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = 1.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = c(1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = c(-1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE)) expect_error(.assertIsInClosedInterval(x = c(NA_real_, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInClosedInterval(x = c(0, -1, 1, 2), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) }) test_that("Testing '.assertIsInOpenInterval'", { invisible(capture.output(expect_error(.assertIsInOpenInterval(x = 0.0001, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = 0.9999, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = FALSE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = NA_real_, xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = c(0.9999, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) invisible(capture.output(expect_error(.assertIsInOpenInterval(x = c(NA_real_, 0.0001, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE), NA))) expect_error(.assertIsInOpenInterval(x = 0, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = 0, xName = "x", lower = 0, upper = NA_real_, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = 1, xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = c(1.0001, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = c(-1, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = TRUE)) expect_error(.assertIsInOpenInterval(x = c(NA_real_, NA_real_), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) expect_error(.assertIsInOpenInterval(x = c(0, -1, 1, 2), xName = "x", lower = 0, upper = 1, naAllowed = FALSE)) }) test_that("Testing '.assertDesignParameterExists'", { expect_error(.assertDesignParameterExists(), "Missing argument: 'design' must be defined", fixed = TRUE ) expect_error(.assertDesignParameterExists(design = getAssertionTestDesign()), "Missing argument: 'parameterName' must be defined", fixed = TRUE ) expect_error(.assertDesignParameterExists(design = getAssertionTestDesign(), parameterName = "kMax"), "Missing argument: 'defaultValue' must be defined", fixed = TRUE ) expect_error(.assertDesignParameterExists( design = getAssertionTestDesign(), parameterName = "kMax", defaultValue = C_KMAX_DEFAULT ), "Missing argument: parameter 'kMax' must be specified in design", fixed = TRUE ) expect_error(.assertDesignParameterExists( design = getAssertionTestDesign(kMax = NA_integer_), parameterName = "kMax", defaultValue = C_KMAX_DEFAULT ), "Missing argument: parameter 'kMax' must be specified in design", fixed = TRUE ) }) test_that("Testing '.assertIsValidThetaRange'", { expect_error(.assertIsValidThetaRange(thetaRange = c()), "Illegal argument: 'thetaRange' (NULL) must be a vector with two entries defining minimum and maximum or a sequence of numeric values with length > 2", fixed = TRUE ) expect_error(.assertIsValidThetaRange(thetaRange = c(1, -2)), "Illegal argument: 'thetaRange' with length 2 must contain minimum < maximum (1 >= -2)", fixed = TRUE ) expect_equal(.assertIsValidThetaRange(thetaRange = c(1, 2, 3)), c(1, 2, 3)) expect_equal(.assertIsValidThetaRange(thetaRange = c(-1, 2)), seq(-1, 2, 3 / C_THETA_RANGE_SEQUENCE_LENGTH_DEFAULT)) }) test_that("Testing '.assertIsSingleNumber'", { expect_error(.assertIsSingleNumber(NA, "x"), "Illegal argument: 'x' (NA) must be a valid numeric value", fixed = TRUE ) expect_error(.assertIsSingleNumber(NULL, "x"), "Missing argument: 'x' must be a valid numeric value", fixed = TRUE ) expect_error(.assertIsSingleNumber(c(1, 2), "x"), "Illegal argument: 'x' c(1, 2) must be a single numeric value", fixed = TRUE ) expect_error(.assertIsSingleNumber(numeric(0), "x"), "Missing argument: 'x' must be a valid numeric value", fixed = TRUE ) }) test_that("Testing '.assertAssociatedArgumentsAreDefined'", { expect_error(.assertAssociatedArgumentsAreDefined(a = NA, b = 1), "Missing argument: 'a' must be defined because 'b' is defined", fixed = TRUE ) expect_error(.assertAssociatedArgumentsAreDefined(a = NA, b = 1, c = NA), "Missing argument: 'a', 'c' must be defined because 'b' is defined", fixed = TRUE ) expect_error(.assertAssociatedArgumentsAreDefined(a = NA, b = 1, c = 2), "Missing argument: 'a' must be defined because 'b', 'c' are defined", fixed = TRUE ) }) test_that("Testing '.associatedArgumentsAreDefined'", { expect_equal(.associatedArgumentsAreDefined(nPlanned = NA_real_, thetaH1 = NA_real_), FALSE) expect_warning(expect_equal(.associatedArgumentsAreDefined(nPlanned = NA_real_, thetaH1 = 1), FALSE), "Incomplete associated arguments: 'nPlanned' should be defined because 'thetaH1' is defined", fixed = TRUE ) expect_equal(.associatedArgumentsAreDefined(nPlanned = 1, thetaH1 = 1), TRUE) }) test_that("Testing '.isValidNPlanned'", { expect_equal(.isValidNPlanned(nPlanned = c(1, 2), kMax = 4, stage = 2), TRUE) expect_silent(.isValidNPlanned(nPlanned = NA_real_, kMax = 4, stage = 2)) expect_warning(.isValidNPlanned(nPlanned = c(1), kMax = 4, stage = 2), "'nPlanned' (1) will be ignored: length must be equal to 2 (kMax - stage = 4 - 2)", fixed = TRUE ) expect_warning(.isValidNPlanned(nPlanned = c(1, 2, 3), kMax = 4, stage = 2), "'nPlanned' (1, 2, 3) will be ignored: length must be equal to 2 (kMax - stage = 4 - 2)", fixed = TRUE ) }) test_that("Testing '.assertIsValidSummaryIntervalFormat'", { .assertIsValidSummaryIntervalFormat("[%s; %s]") .assertIsValidSummaryIntervalFormat("%s - %s") .assertIsValidSummaryIntervalFormat("(%s, %s)") expect_error(.assertIsValidSummaryIntervalFormat("[%s; %s; %s]")) expect_error(.assertIsValidSummaryIntervalFormat("[%s]")) expect_error(.assertIsValidSummaryIntervalFormat("")) expect_error(.assertIsValidSummaryIntervalFormat(1)) }) test_that("Testing '.assertIsSingleInteger'", { expect_error(.assertIsSingleInteger(NA_integer_, "x", naAllowed = FALSE)) expect_error(.assertIsSingleInteger(-1, "x", naAllowed = FALSE)) expect_error(.assertIsSingleInteger(-1, "x", naAllowed = FALSE, validateType = FALSE), NA) expect_error(.assertIsSingleInteger(NA_integer_, "x", naAllowed = TRUE), NA) expect_error(.assertIsSingleInteger(-1, "x", naAllowed = TRUE)) expect_error(.assertIsSingleInteger("1", "x", naAllowed = TRUE)) expect_error(.assertIsSingleInteger(1, "x", naAllowed = TRUE, validateType = TRUE)) expect_error(.assertIsSingleInteger(1, "x", naAllowed = TRUE, validateType = FALSE), NA) }) test_that("Testing '.assertIsSinglePositiveInteger'", { expect_error(.assertIsSinglePositiveInteger(NA_integer_, "x", naAllowed = FALSE)) expect_error(.assertIsSinglePositiveInteger(-1, "x", naAllowed = FALSE)) expect_error(.assertIsSinglePositiveInteger(NA_integer_, "x", naAllowed = TRUE), NA) expect_error(.assertIsSinglePositiveInteger(NA_real_, "x", naAllowed = TRUE)) expect_error(.assertIsSinglePositiveInteger(-1, "x", naAllowed = TRUE)) expect_error(.assertIsSinglePositiveInteger("1", "x", naAllowed = TRUE)) expect_error(.assertIsSinglePositiveInteger(1, "x", naAllowed = TRUE, validateType = TRUE)) expect_error(.assertIsSinglePositiveInteger(1, "x", naAllowed = TRUE, validateType = FALSE), NA) }) test_that("Testing '.assertIsSingleLogical'", { expect_error(.assertIsSingleLogical("TRUE", "x", naAllowed = FALSE)) expect_error(.assertIsSingleLogical("FALSE", "x", naAllowed = FALSE)) expect_error(.assertIsSingleLogical(TRUE, "x", naAllowed = FALSE), NA) expect_error(.assertIsSingleLogical(FALSE, "x", naAllowed = FALSE), NA) expect_error(.assertIsSingleLogical(NA, "x", naAllowed = TRUE), NA) expect_error(.assertIsSingleLogical(NA, "x", naAllowed = FALSE)) }) test_that("Testing '.assertIsValidMatrix'", { expect_error(.assertIsValidMatrix(c(), "x", naAllowed = FALSE)) expect_error(.assertIsValidMatrix(NULL, "x", naAllowed = FALSE)) expect_error(.assertIsValidMatrix(1:3, "x", naAllowed = FALSE)) expect_error(.assertIsValidMatrix(1:3, "x", naAllowed = TRUE)) expect_error(.assertIsValidMatrix("a", "x", naAllowed = FALSE)) expect_error(.assertIsValidMatrix("a", "x", naAllowed = TRUE)) expect_error(.assertIsValidMatrix(NA, "x", naAllowed = FALSE)) expect_error(.assertIsValidMatrix(NA, "x", naAllowed = TRUE)) }) rpact/tests/testthat/helper-f_core_assertions.R0000644000176200001440000000372014277150417021506 0ustar liggesusers## | ## | *Unit tests helper functions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 6117 $ ## | Last changed: $Date: 2022-05-04 15:55:23 +0200 (Mi, 04 Mai 2022) $ ## | Last changed by: $Author: pahlke $ ## | getAssertionTestDesign <- function(..., kMax = NA_integer_, informationRates = NA_real_, futilityBounds = NA_real_, designClass = "TrialDesignInverseNormal") { if (designClass == "TrialDesignFisher") { return(TrialDesignFisher( kMax = kMax, alpha = 0.025, method = "equalAlpha", alpha0Vec = futilityBounds, informationRates = informationRates, tolerance = 1e-14, iterations = 0, seed = 9498485 )) } return(.createDesign( designClass = designClass, kMax = kMax, alpha = 0.025, beta = 0.2, sided = 1, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = "OF", delta = 0, optimizationCriterion = "ASNH1", gammaA = 1, typeBetaSpending = "none", userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = 1, tolerance = 1e-06 )) } rpact/tests/testthat/test-f_design_fisher_combination_test.R0000644000176200001440000010303214370207346024231 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_design_fisher_combination_test.R ## | Creation date: 06 February 2023, 12:12:01 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Fisher Design Functionality") test_that("'getDesignFisher' with default parameters: parameters and results are as expected", { # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationEqualAlpha} designFisher0 <- getDesignFisher() ## Comparison of the results of TrialDesignFisher object 'designFisher0' with expected results expect_equal(designFisher0$alphaSpent, c(0.012308547, 0.01962413, 0.025), tolerance = 1e-07) expect_equal(designFisher0$criticalValues, c(0.012308547, 0.0016635923, 0.00029106687), tolerance = 1e-07) expect_equal(designFisher0$stageLevels, c(0.012308547, 0.012308547, 0.012308547), tolerance = 1e-07) expect_equal(designFisher0$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher0), NA))) expect_output(print(designFisher0)$show()) invisible(capture.output(expect_error(summary(designFisher0), NA))) expect_output(summary(designFisher0)$show()) designFisher0CodeBased <- eval(parse(text = getObjectRCode(designFisher0, stringWrapParagraphWidth = NULL))) expect_equal(designFisher0CodeBased$alphaSpent, designFisher0$alphaSpent, tolerance = 1e-05) expect_equal(designFisher0CodeBased$criticalValues, designFisher0$criticalValues, tolerance = 1e-05) expect_equal(designFisher0CodeBased$stageLevels, designFisher0$stageLevels, tolerance = 1e-05) expect_equal(designFisher0CodeBased$nonStochasticCurtailment, designFisher0$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher0), "character") df <- as.data.frame(designFisher0) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher0) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignFisher' with default parameters and simulated alpha: parameters and results are as expected", { .skipTestIfDisabled() .skipTestIfNotX64() # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationEqualAlpha} designFisher <- getDesignFisher(iterations = 10000, seed = 1234567) ## Comparison of the results of TrialDesignFisher object 'designFisher' with expected results expect_equal(designFisher$alphaSpent, c(0.012308547, 0.01962413, 0.025), tolerance = 1e-07) expect_equal(designFisher$criticalValues, c(0.012308547, 0.0016635923, 0.00029106687), tolerance = 1e-07) expect_equal(designFisher$stageLevels, c(0.012308547, 0.012308547, 0.012308547), tolerance = 1e-07) expect_equal(designFisher$simAlpha, 0.0243, tolerance = 1e-07) expect_equal(designFisher$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher), NA))) expect_output(print(designFisher)$show()) invisible(capture.output(expect_error(summary(designFisher), NA))) expect_output(summary(designFisher)$show()) designFisherCodeBased <- eval(parse(text = getObjectRCode(designFisher, stringWrapParagraphWidth = NULL))) expect_equal(designFisherCodeBased$alphaSpent, designFisher$alphaSpent, tolerance = 1e-05) expect_equal(designFisherCodeBased$criticalValues, designFisher$criticalValues, tolerance = 1e-05) expect_equal(designFisherCodeBased$stageLevels, designFisher$stageLevels, tolerance = 1e-05) expect_equal(designFisherCodeBased$simAlpha, designFisher$simAlpha, tolerance = 1e-05) expect_equal(designFisherCodeBased$nonStochasticCurtailment, designFisher$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher), "character") df <- as.data.frame(designFisher) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignFisher' with kMax = 2,3,..,6: parameters and results are as expected for different arguments", { .skipTestIfDisabled() # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationFullAlpha} designFisher1 <- getDesignFisher(kMax = 2, alpha = 0.05, alpha0Vec = 0.5, method = "fullAlpha") ## Comparison of the results of TrialDesignFisher object 'designFisher1' with expected results expect_equal(designFisher1$alphaSpent, c(0.023314852, 0.05), tolerance = 1e-07) expect_equal(designFisher1$criticalValues, c(0.023314852, 0.0087049407), tolerance = 1e-07) expect_equal(designFisher1$stageLevels, c(0.023314852, 0.05), tolerance = 1e-07) expect_equal(designFisher1$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher1), NA))) expect_output(print(designFisher1)$show()) invisible(capture.output(expect_error(summary(designFisher1), NA))) expect_output(summary(designFisher1)$show()) designFisher1CodeBased <- eval(parse(text = getObjectRCode(designFisher1, stringWrapParagraphWidth = NULL))) expect_equal(designFisher1CodeBased$alphaSpent, designFisher1$alphaSpent, tolerance = 1e-05) expect_equal(designFisher1CodeBased$criticalValues, designFisher1$criticalValues, tolerance = 1e-05) expect_equal(designFisher1CodeBased$stageLevels, designFisher1$stageLevels, tolerance = 1e-05) expect_equal(designFisher1CodeBased$nonStochasticCurtailment, designFisher1$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher1), "character") df <- as.data.frame(designFisher1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationEqualAlpha} designFisher2 <- getDesignFisher(kMax = 3, alpha0Vec = c(0.7, 0.5), informationRates = c(0.1, 0.3, 1), method = "equalAlpha") ## Comparison of the results of TrialDesignFisher object 'designFisher2' with expected results expect_equal(designFisher2$alphaSpent, c(0.011823636, 0.019807903, 0.025), tolerance = 1e-07) expect_equal(designFisher2$criticalValues, c(0.011823636, 0.00036698794, 3.0631293e-07), tolerance = 1e-07) expect_equal(designFisher2$stageLevels, c(0.011823636, 0.011823636, 0.011823636), tolerance = 1e-07) expect_equal(designFisher2$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher2), NA))) expect_output(print(designFisher2)$show()) invisible(capture.output(expect_error(summary(designFisher2), NA))) expect_output(summary(designFisher2)$show()) designFisher2CodeBased <- eval(parse(text = getObjectRCode(designFisher2, stringWrapParagraphWidth = NULL))) expect_equal(designFisher2CodeBased$alphaSpent, designFisher2$alphaSpent, tolerance = 1e-05) expect_equal(designFisher2CodeBased$criticalValues, designFisher2$criticalValues, tolerance = 1e-05) expect_equal(designFisher2CodeBased$stageLevels, designFisher2$stageLevels, tolerance = 1e-05) expect_equal(designFisher2CodeBased$nonStochasticCurtailment, designFisher2$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher2), "character") df <- as.data.frame(designFisher2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationEqualAlpha} designFisher3 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7, 0.5, 0.3), informationRates = c(0.1, 0.3, 0.6, 1), bindingFutility = FALSE, method = "equalAlpha") ## Comparison of the results of TrialDesignFisher object 'designFisher3' with expected results expect_equal(designFisher3$alphaSpent, c(0.0082575405, 0.014885188, 0.020347598, 0.025), tolerance = 1e-07) expect_equal(designFisher3$criticalValues, c(0.0082575405, 0.00021760942, 4.7163541e-06, 8.3369321e-08), tolerance = 1e-07) expect_equal(designFisher3$stageLevels, c(0.0082575405, 0.0082575405, 0.0082575405, 0.0082575405), tolerance = 1e-07) expect_equal(designFisher3$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher3), NA))) expect_output(print(designFisher3)$show()) invisible(capture.output(expect_error(summary(designFisher3), NA))) expect_output(summary(designFisher3)$show()) designFisher3CodeBased <- eval(parse(text = getObjectRCode(designFisher3, stringWrapParagraphWidth = NULL))) expect_equal(designFisher3CodeBased$alphaSpent, designFisher3$alphaSpent, tolerance = 1e-05) expect_equal(designFisher3CodeBased$criticalValues, designFisher3$criticalValues, tolerance = 1e-05) expect_equal(designFisher3CodeBased$stageLevels, designFisher3$stageLevels, tolerance = 1e-05) expect_equal(designFisher3CodeBased$nonStochasticCurtailment, designFisher3$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher3), "character") df <- as.data.frame(designFisher3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationEqualAlpha} designFisher4 <- getDesignFisher(kMax = 5, alpha0Vec = c(0.7, 0.5, 0.3, 0.3), informationRates = c(0.1, 0.3, 0.5, 0.6, 1), method = "equalAlpha") ## Comparison of the results of TrialDesignFisher object 'designFisher4' with expected results expect_equal(designFisher4$alphaSpent, c(0.011157609, 0.018733282, 0.022750003, 0.024162936, 0.025), tolerance = 1e-07) expect_equal(designFisher4$criticalValues, c(0.011157609, 0.00033722277, 2.3068413e-05, 5.4825339e-06, 9.8015456e-08), tolerance = 1e-07) expect_equal(designFisher4$stageLevels, c(0.011157609, 0.011157609, 0.011157609, 0.011157609, 0.011157609), tolerance = 1e-07) expect_equal(designFisher4$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher4), NA))) expect_output(print(designFisher4)$show()) invisible(capture.output(expect_error(summary(designFisher4), NA))) expect_output(summary(designFisher4)$show()) designFisher4CodeBased <- eval(parse(text = getObjectRCode(designFisher4, stringWrapParagraphWidth = NULL))) expect_equal(designFisher4CodeBased$alphaSpent, designFisher4$alphaSpent, tolerance = 1e-05) expect_equal(designFisher4CodeBased$criticalValues, designFisher4$criticalValues, tolerance = 1e-05) expect_equal(designFisher4CodeBased$stageLevels, designFisher4$stageLevels, tolerance = 1e-05) expect_equal(designFisher4CodeBased$nonStochasticCurtailment, designFisher4$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher4), "character") df <- as.data.frame(designFisher4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationEqualAlpha} designFisher5 <- getDesignFisher(kMax = 5, alpha = 0.2, alpha0Vec = c(0.7, 0.5, 0.3, 0.2), method = "equalAlpha") ## Comparison of the results of TrialDesignFisher object 'designFisher5' with expected results expect_equal(designFisher5$alphaSpent, c(0.12649082, 0.17362071, 0.19349017, 0.19931765, 0.2), tolerance = 1e-07) expect_equal(designFisher5$criticalValues, c(0.12649082, 0.027546669, 0.0068856935, 0.0018391192, 0.00051168366), tolerance = 1e-07) expect_equal(designFisher5$stageLevels, c(0.12649082, 0.12649082, 0.12649082, 0.12649082, 0.12649082), tolerance = 1e-07) expect_equal(designFisher5$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher5), NA))) expect_output(print(designFisher5)$show()) invisible(capture.output(expect_error(summary(designFisher5), NA))) expect_output(summary(designFisher5)$show()) designFisher5CodeBased <- eval(parse(text = getObjectRCode(designFisher5, stringWrapParagraphWidth = NULL))) expect_equal(designFisher5CodeBased$alphaSpent, designFisher5$alphaSpent, tolerance = 1e-05) expect_equal(designFisher5CodeBased$criticalValues, designFisher5$criticalValues, tolerance = 1e-05) expect_equal(designFisher5CodeBased$stageLevels, designFisher5$stageLevels, tolerance = 1e-05) expect_equal(designFisher5CodeBased$nonStochasticCurtailment, designFisher5$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher5), "character") df <- as.data.frame(designFisher5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationFullAlpha} designFisher6 <- getDesignFisher(kMax = 4, informationRates = c(0.1, 0.3, 0.7, 1), method = "fullAlpha") ## Comparison of the results of TrialDesignFisher object 'designFisher6' with expected results expect_equal(designFisher6$alphaSpent, c(1.0550077e-06, 0.00020026524, 0.0065266359, 0.025), tolerance = 1e-07) expect_equal(designFisher6$criticalValues, c(1.0550077e-06, 1.0550077e-06, 1.0550077e-06, 1.0550077e-06), tolerance = 1e-07) expect_equal(designFisher6$stageLevels, c(1.0550077e-06, 0.00020026524, 0.0065266359, 0.025), tolerance = 1e-07) expect_equal(designFisher6$nonStochasticCurtailment, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher6), NA))) expect_output(print(designFisher6)$show()) invisible(capture.output(expect_error(summary(designFisher6), NA))) expect_output(summary(designFisher6)$show()) designFisher6CodeBased <- eval(parse(text = getObjectRCode(designFisher6, stringWrapParagraphWidth = NULL))) expect_equal(designFisher6CodeBased$alphaSpent, designFisher6$alphaSpent, tolerance = 1e-05) expect_equal(designFisher6CodeBased$criticalValues, designFisher6$criticalValues, tolerance = 1e-05) expect_equal(designFisher6CodeBased$stageLevels, designFisher6$stageLevels, tolerance = 1e-05) expect_equal(designFisher6CodeBased$nonStochasticCurtailment, designFisher6$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher6), "character") df <- as.data.frame(designFisher6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationFullAlpha} designFisher7 <- getDesignFisher(kMax = 3, alpha0Vec = c(0.7, 0.6), informationRates = c(0.1, 0.7, 1), method = "fullAlpha") ## Comparison of the results of TrialDesignFisher object 'designFisher7' with expected results expect_equal(designFisher7$alphaSpent, c(2.1580149e-06, 0.0066525356, 0.01947245), tolerance = 1e-07) expect_equal(designFisher7$criticalValues, c(2.1580149e-06, 2.1580149e-06, 2.1580149e-06), tolerance = 1e-07) expect_equal(designFisher7$stageLevels, c(2.1580149e-06, 0.008216166, 0.025), tolerance = 1e-07) expect_equal(designFisher7$nonStochasticCurtailment, TRUE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher7), NA))) expect_output(print(designFisher7)$show()) invisible(capture.output(expect_error(summary(designFisher7), NA))) expect_output(summary(designFisher7)$show()) designFisher7CodeBased <- eval(parse(text = getObjectRCode(designFisher7, stringWrapParagraphWidth = NULL))) expect_equal(designFisher7CodeBased$alphaSpent, designFisher7$alphaSpent, tolerance = 1e-05) expect_equal(designFisher7CodeBased$criticalValues, designFisher7$criticalValues, tolerance = 1e-05) expect_equal(designFisher7CodeBased$stageLevels, designFisher7$stageLevels, tolerance = 1e-05) expect_equal(designFisher7CodeBased$nonStochasticCurtailment, designFisher7$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher7), "character") df <- as.data.frame(designFisher7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationNoTreatmentStageInteraction} designFisher8 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7, 0.6, 0.5), method = "noInteraction") ## Comparison of the results of TrialDesignFisher object 'designFisher8' with expected results expect_equal(designFisher8$alphaSpent, c(0.0098603693, 0.012073314, 0.018133935, 0.025), tolerance = 1e-07) expect_equal(designFisher8$criticalValues, c(0.0098603693, 0.00051915905, 0.00031149543, 0.00015574772), tolerance = 1e-07) expect_equal(designFisher8$stageLevels, c(0.0098603693, 0.0044457148, 0.012979977, 0.025), tolerance = 1e-07) expect_equal(designFisher8$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher8), NA))) expect_output(print(designFisher8)$show()) invisible(capture.output(expect_error(summary(designFisher8), NA))) expect_output(summary(designFisher8)$show()) designFisher8CodeBased <- eval(parse(text = getObjectRCode(designFisher8, stringWrapParagraphWidth = NULL))) expect_equal(designFisher8CodeBased$alphaSpent, designFisher8$alphaSpent, tolerance = 1e-05) expect_equal(designFisher8CodeBased$criticalValues, designFisher8$criticalValues, tolerance = 1e-05) expect_equal(designFisher8CodeBased$stageLevels, designFisher8$stageLevels, tolerance = 1e-05) expect_equal(designFisher8CodeBased$nonStochasticCurtailment, designFisher8$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher8), "character") df <- as.data.frame(designFisher8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationNoTreatmentStageInteraction} designFisher9 <- getDesignFisher(kMax = 6, alpha = 0.1, alpha0Vec = c(0.7, 0.6, 0.5, 0.4, 0.3), method = "noInteraction") ## Comparison of the results of TrialDesignFisher object 'designFisher9' with expected results expect_equal(designFisher9$alphaSpent, c(0.058031958, 0.064517887, 0.079453273, 0.092924559, 0.098794775, 0.1), tolerance = 1e-07) expect_equal(designFisher9$criticalValues, c(0.058031958, 0.0026047006, 0.0015628203, 0.00078141017, 0.00031256407, 9.3769221e-05), tolerance = 1e-07) expect_equal(designFisher9$stageLevels, c(0.058031958, 0.018103809, 0.044282865, 0.074062827, 0.095655516, 0.1), tolerance = 1e-07) expect_equal(designFisher9$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher9), NA))) expect_output(print(designFisher9)$show()) invisible(capture.output(expect_error(summary(designFisher9), NA))) expect_output(summary(designFisher9)$show()) designFisher9CodeBased <- eval(parse(text = getObjectRCode(designFisher9, stringWrapParagraphWidth = NULL))) expect_equal(designFisher9CodeBased$alphaSpent, designFisher9$alphaSpent, tolerance = 1e-05) expect_equal(designFisher9CodeBased$criticalValues, designFisher9$criticalValues, tolerance = 1e-05) expect_equal(designFisher9CodeBased$stageLevels, designFisher9$stageLevels, tolerance = 1e-05) expect_equal(designFisher9CodeBased$nonStochasticCurtailment, designFisher9$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher9), "character") df <- as.data.frame(designFisher9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationNoTreatmentStageInteraction} designFisher10 <- getDesignFisher( kMax = 6, alpha = 0.1, alpha0Vec = c(0.7, 0.6, 0.5, 0.4, 0.3), method = "noInteraction", informationRates = c(0.1, 0.15, 0.3, 0.4, 0.9, 1) ) ## Comparison of the results of TrialDesignFisher object 'designFisher10' with expected results expect_equal(designFisher10$alphaSpent, c(0.082381502, 0.082401579, 0.084330144, 0.086806556, 0.10023391, 0.1), tolerance = 1e-07) expect_equal(designFisher10$criticalValues, c(0.082381502, 0.00017925198, 0.00011812048, 5.906024e-05, 3.9204058e-05, 1.1761218e-05), tolerance = 1e-07) expect_equal(designFisher10$stageLevels, c(0.082381502, 0.0005998602, 0.0062212598, 0.012409923, 0.09943647, 0.1), tolerance = 1e-07) expect_equal(designFisher10$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher10), NA))) expect_output(print(designFisher10)$show()) invisible(capture.output(expect_error(summary(designFisher10), NA))) expect_output(summary(designFisher10)$show()) designFisher10CodeBased <- eval(parse(text = getObjectRCode(designFisher10, stringWrapParagraphWidth = NULL))) expect_equal(designFisher10CodeBased$alphaSpent, designFisher10$alphaSpent, tolerance = 1e-05) expect_equal(designFisher10CodeBased$criticalValues, designFisher10$criticalValues, tolerance = 1e-05) expect_equal(designFisher10CodeBased$stageLevels, designFisher10$stageLevels, tolerance = 1e-05) expect_equal(designFisher10CodeBased$nonStochasticCurtailment, designFisher10$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher10), "character") df <- as.data.frame(designFisher10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationUserDefinedAlphaSpending} designFisher11 <- getDesignFisher(kMax = 4, alpha0Vec = c(0.7, 0.6, 0.5), method = "userDefinedAlpha", userAlphaSpending = c(0.01, 0.015, 0.02, 0.025)) ## Comparison of the results of TrialDesignFisher object 'designFisher11' with expected results expect_equal(designFisher11$alphaSpent, c(0.01, 0.015, 0.02, 0.025), tolerance = 1e-07) expect_equal(designFisher11$criticalValues, c(0.01, 0.0011768873, 0.00031357454, 0.00011586425), tolerance = 1e-07) expect_equal(designFisher11$stageLevels, c(0.01, 0.0091148534, 0.013047692, 0.020300118), tolerance = 1e-07) expect_equal(designFisher11$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher11), NA))) expect_output(print(designFisher11)$show()) invisible(capture.output(expect_error(summary(designFisher11), NA))) expect_output(summary(designFisher11)$show()) designFisher11CodeBased <- eval(parse(text = getObjectRCode(designFisher11, stringWrapParagraphWidth = NULL))) expect_equal(designFisher11CodeBased$alphaSpent, designFisher11$alphaSpent, tolerance = 1e-05) expect_equal(designFisher11CodeBased$criticalValues, designFisher11$criticalValues, tolerance = 1e-05) expect_equal(designFisher11CodeBased$stageLevels, designFisher11$stageLevels, tolerance = 1e-05) expect_equal(designFisher11CodeBased$nonStochasticCurtailment, designFisher11$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher11), "character") df <- as.data.frame(designFisher11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Tab.]{fs:tab:output:getDesignFisher} # @refFS[Formula]{fs:FisherCombinationUserDefinedAlphaSpending} designFisher12 <- getDesignFisher( kMax = 4, alpha0Vec = c(0.7, 0.6, 0.5), informationRates = c(0.1, 0.3, 0.7, 1), method = "userDefinedAlpha", userAlphaSpending = c(0.01, 0.015, 0.02, 0.025) ) ## Comparison of the results of TrialDesignFisher object 'designFisher12' with expected results expect_equal(designFisher12$alphaSpent, c(0.01, 0.015, 0.02, 0.025), tolerance = 1e-07) expect_equal(designFisher12$criticalValues, c(0.01, 0.00018389153, 2.6484943e-06, 5.2344628e-07), tolerance = 1e-07) expect_equal(designFisher12$stageLevels, c(0.01, 0.0073532156, 0.0101804, 0.018500415), tolerance = 1e-07) expect_equal(designFisher12$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher12), NA))) expect_output(print(designFisher12)$show()) invisible(capture.output(expect_error(summary(designFisher12), NA))) expect_output(summary(designFisher12)$show()) designFisher12CodeBased <- eval(parse(text = getObjectRCode(designFisher12, stringWrapParagraphWidth = NULL))) expect_equal(designFisher12CodeBased$alphaSpent, designFisher12$alphaSpent, tolerance = 1e-05) expect_equal(designFisher12CodeBased$criticalValues, designFisher12$criticalValues, tolerance = 1e-05) expect_equal(designFisher12CodeBased$stageLevels, designFisher12$stageLevels, tolerance = 1e-05) expect_equal(designFisher12CodeBased$nonStochasticCurtailment, designFisher12$nonStochasticCurtailment, tolerance = 1e-05) expect_type(names(designFisher12), "character") df <- as.data.frame(designFisher12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(designFisher12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getDesignFisher': illegal arguments throw exceptions as expected", { expect_error(getDesignFisher( method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.025), kMax = 4 ), paste0( "Conflicting arguments: length of 'userAlphaSpending' (5) ", "must be equal to 'kMax' (4)" ), fixed = TRUE ) expect_error(getDesignFisher( method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.025), informationRates = c(0.5, 1) ), paste0( "Conflicting arguments: length of 'userAlphaSpending' (3) ", "must be equal to length of 'informationRates' (2)" ), fixed = TRUE ) expect_error(getDesignFisher( method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.025), informationRates = c(0.4, 1) ), paste0( "Conflicting arguments: length of 'userAlphaSpending' (3) ", "must be equal to length of 'informationRates' (2)" ), fixed = TRUE ) expect_error(getDesignFisher( method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023, 0.023, 0.021) ), paste0( "'userAlphaSpending' = c(0.01, 0.02, 0.023, 0.023, 0.021) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_5 <= alpha = 0.021" ), fixed = TRUE ) expect_error(getDesignFisher( method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023), alpha = 0.02 ), paste0( "'userAlphaSpending' = c(0.01, 0.02, 0.023) must be a vector that ", "satisfies the following condition: 0 <= alpha_1 <= .. <= alpha_3 <= alpha = 0.02" ), fixed = TRUE ) expect_equal(getDesignFisher( method = C_FISHER_METHOD_USER_DEFINED_ALPHA, userAlphaSpending = c(0.01, 0.02, 0.023) )$alpha, 0.023) expect_error(getDesignFisher(method = C_FISHER_METHOD_USER_DEFINED_ALPHA), "Missing argument: parameter 'userAlphaSpending' must be specified in design", fixed = TRUE ) expect_error(getDesignFisher(kMax = Inf), paste0( "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND_FISHER, "]" ), fixed = TRUE ) expect_error(getDesignFisher(kMax = -Inf), paste0( "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; ", C_KMAX_UPPER_BOUND_FISHER, "]" ), fixed = TRUE ) expect_error(getDesignFisher(kMax = -Inf), "Argument out of bounds: 'kMax' (-Inf) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -10), "Argument out of bounds: 'kMax' (-10) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -9), "Argument out of bounds: 'kMax' (-9) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -8), "Argument out of bounds: 'kMax' (-8) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -7), "Argument out of bounds: 'kMax' (-7) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -6), "Argument out of bounds: 'kMax' (-6) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -5), "Argument out of bounds: 'kMax' (-5) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -4), "Argument out of bounds: 'kMax' (-4) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -3), "Argument out of bounds: 'kMax' (-3) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -2), "Argument out of bounds: 'kMax' (-2) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = -1), "Argument out of bounds: 'kMax' (-1) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 0), "Argument out of bounds: 'kMax' (0) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 7), "Argument out of bounds: 'kMax' (7) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 8), "Argument out of bounds: 'kMax' (8) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 9), "Argument out of bounds: 'kMax' (9) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 10), "Argument out of bounds: 'kMax' (10) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 11), "Argument out of bounds: 'kMax' (11) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 12), "Argument out of bounds: 'kMax' (12) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 13), "Argument out of bounds: 'kMax' (13) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 14), "Argument out of bounds: 'kMax' (14) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 15), "Argument out of bounds: 'kMax' (15) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 16), "Argument out of bounds: 'kMax' (16) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = Inf), "Argument out of bounds: 'kMax' (Inf) is out of bounds [1; 6]", fixed = TRUE) expect_error(getDesignFisher(kMax = 2, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (2)", fixed = TRUE) expect_error(getDesignFisher(kMax = 3, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (3)", fixed = TRUE) expect_error(getDesignFisher(kMax = 5, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (5)", fixed = TRUE) expect_error(getDesignFisher(kMax = 6, informationRates = c(0.01, 0.02, 0.04, 0.05)), "Conflicting arguments: length of 'informationRates' (4) must be equal to 'kMax' (6)", fixed = TRUE) expect_error(getDesignFisher(alpha0Vec = c(0, 1)), "Argument out of bounds: 'alpha0Vec' (0, 1) is out of bounds (0; 1]", fixed = TRUE ) expect_error(getDesignFisher(alpha0Vec = c(0.1, 1.01)), "Argument out of bounds: 'alpha0Vec' (0.1, 1.01) is out of bounds (0; 1]", fixed = TRUE ) }) rpact/tests/testthat/helper-f_analysis_base_rates.R0000644000176200001440000000417614277150416022324 0ustar liggesusers## | ## | *Unit tests helper functions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 6117 $ ## | Last changed: $Date: 2022-05-04 15:55:23 +0200 (Mi, 04 Mai 2022) $ ## | Last changed by: $Author: pahlke $ ## | testGetAnalysisResultsPlotData <- function(x, ..., nPlanned = NA_real_, stage = NA_integer_, allocationRatioPlanned = NA_real_) { plotArgs <- .getAnalysisResultsPlotArguments( x = x, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) if (x$getDataInput()$isDatasetMeans()) { assumedStDev <- .getOptionalArgument("assumedStDev", ...) if (is.null(assumedStDev)) { assumedStDev <- x$assumedStDev return(.getConditionalPowerPlot( stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, allocationRatioPlanned = plotArgs$allocationRatioPlanned, assumedStDev = assumedStDev, ... )) } } else if (x$getDataInput()$isDatasetRates()) { pi2 <- .getOptionalArgument("pi2", ...) if (is.null(pi2)) { pi2 <- x$pi2 return(.getConditionalPowerPlot( stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, allocationRatioPlanned = plotArgs$allocationRatioPlanned, pi2 = pi2, ... )) } } return(.getConditionalPowerPlot( stageResults = plotArgs$stageResults, nPlanned = plotArgs$nPlanned, allocationRatioPlanned = plotArgs$allocationRatioPlanned, ... )) } rpact/tests/testthat/test-f_simulation_multiarm_rates.R0000644000176200001440000036314514370207346023310 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_simulation_multiarm_rates.R ## | Creation date: 06 February 2023, 12:14:43 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing Simulation Multi-Arm Rates Function") test_that("'getSimulationMultiArmRates': several configurations", { .skipTestIfNotX64() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmRates} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmRatesGenerate} # @refFS[Formula]{fs:simulationMultiArmRatesTestStatistics} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} x1 <- getSimulationMultiArmRates( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x1' with expected results expect_equal(x1$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x1$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x1$iterations[3, ], c(10, 10, 9, 7)) expect_equal(x1$rejectAtLeastOne, c(0, 0.1, 0.6, 0.8), tolerance = 1e-07) expect_equal(unlist(as.list(x1$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0.3, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0.2, 0, 0.2, 0.2), tolerance = 1e-07) expect_equal(x1$futilityStop, c(0, 0, 0, 0)) expect_equal(x1$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x1$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x1$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x1$earlyStop[2, ], c(0, 0, 0.1, 0.3), tolerance = 1e-07) expect_equal(x1$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x1$successPerStage[2, ], c(0, 0, 0.1, 0.3), tolerance = 1e-07) expect_equal(x1$successPerStage[3, ], c(0, 0.1, 0.5, 0.5), tolerance = 1e-07) expect_equal(unlist(as.list(x1$selectedArms)), c(1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.2, 0.2, 1, 0, 0, 1, 0.1, 0.1, 1, 0.2, 0.2, 1, 0, 0, 1, 0.2, 0.2, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0.4, 0.4, 1, 0.3, 0.2, 1, 0.4, 0.4, 1, 0.3, 0.3, 1, 0.4, 0.3, 1, 0.5, 0.3, 1, 1, 1, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.7), tolerance = 1e-07) expect_equal(x1$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x1$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x1$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x1$expectedNumberOfSubjects, c(334.8, 445, 331.8, 179.8), tolerance = 1e-07) expect_equal(unlist(as.list(x1$sampleSizes)), c(10, 8, 10.4, 10, 10, 10, 10, 11.3, 15.333333, 10, 0, 0, 10, 10, 10, 10, 17.5, 20, 10, 0, 0, 10, 13, 19.142857, 10, 22.4, 22.5, 10, 40, 40, 10, 37.5, 36.555556, 10, 4.4, 8.5714286, 10, 20.4, 38.7, 10, 30, 30, 10, 28.2, 19.111111, 10, 17.1, 15.714286, 10, 60.8, 81.6, 10, 97.5, 100, 10, 77, 71, 10, 34.5, 43.428571), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x1$conditionalPowerAchieved[2, ], c(0.032197948, 0.00019444487, 0.052129075, 0.12394528), tolerance = 1e-07) expect_equal(x1$conditionalPowerAchieved[3, ], c(0.33607045, 0.04525892, 0.4023749, 0.68738904), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x1), NA))) expect_output(print(x1)$show()) invisible(capture.output(expect_error(summary(x1), NA))) expect_output(summary(x1)$show()) x1CodeBased <- eval(parse(text = getObjectRCode(x1, stringWrapParagraphWidth = NULL))) expect_equal(x1CodeBased$iterations, x1$iterations, tolerance = 1e-05) expect_equal(x1CodeBased$rejectAtLeastOne, x1$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x1CodeBased$rejectedArmsPerStage, x1$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$futilityStop, x1$futilityStop, tolerance = 1e-05) expect_equal(x1CodeBased$futilityPerStage, x1$futilityPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$earlyStop, x1$earlyStop, tolerance = 1e-05) expect_equal(x1CodeBased$successPerStage, x1$successPerStage, tolerance = 1e-05) expect_equal(x1CodeBased$selectedArms, x1$selectedArms, tolerance = 1e-05) expect_equal(x1CodeBased$numberOfActiveArms, x1$numberOfActiveArms, tolerance = 1e-05) expect_equal(x1CodeBased$expectedNumberOfSubjects, x1$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x1CodeBased$sampleSizes, x1$sampleSizes, tolerance = 1e-05) expect_equal(x1CodeBased$conditionalPowerAchieved, x1$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x1), "character") df <- as.data.frame(x1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() x2 <- getSimulationMultiArmRates( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "userDefined", activeArms = 4, plannedSubjects = c(10, 30, 50), piControl = 0.3, adaptations = rep(TRUE, 2), effectMatrix = matrix(c(0.1, 0.2, 0.3, 0.4, 0.2, 0.3, 0.4, 0.5), ncol = 4), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x2' with expected results expect_equal(x2$iterations[1, ], c(10, 10)) expect_equal(x2$iterations[2, ], c(10, 10)) expect_equal(x2$iterations[3, ], c(10, 8)) expect_equal(x2$rejectAtLeastOne, c(0.2, 0.7), tolerance = 1e-07) expect_equal(unlist(as.list(x2$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0.2, 0.5), tolerance = 1e-07) expect_equal(x2$futilityStop, c(0, 0)) expect_equal(x2$futilityPerStage[1, ], c(0, 0)) expect_equal(x2$futilityPerStage[2, ], c(0, 0)) expect_equal(x2$earlyStop[1, ], c(0, 0)) expect_equal(x2$earlyStop[2, ], c(0, 0.2), tolerance = 1e-07) expect_equal(x2$successPerStage[1, ], c(0, 0)) expect_equal(x2$successPerStage[2, ], c(0, 0.2), tolerance = 1e-07) expect_equal(x2$successPerStage[3, ], c(0.2, 0.5), tolerance = 1e-07) expect_equal(unlist(as.list(x2$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0.2, 0.2, 1, 0, 0, 1, 0.5, 0.5, 1, 0.7, 0.5, 1, 1, 1, 1, 1, 0.8), tolerance = 1e-07) expect_equal(x2$numberOfActiveArms[1, ], c(4, 4)) expect_equal(x2$numberOfActiveArms[2, ], c(1, 1)) expect_equal(x2$numberOfActiveArms[3, ], c(1, 1)) expect_equal(x2$expectedNumberOfSubjects, c(397.2, 312.8), tolerance = 1e-07) expect_equal(unlist(as.list(x2$sampleSizes)), c(10, 0, 0, 10, 0, 0, 10, 30, 30, 10, 22.4, 37.5, 10, 13, 20, 10, 0, 0, 10, 38.8, 41.8, 10, 52.8, 32.75, 10, 81.8, 91.8, 10, 75.2, 70.25), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_)) expect_equal(x2$conditionalPowerAchieved[2, ], c(0.0097327907, 0.021741893), tolerance = 1e-07) expect_equal(x2$conditionalPowerAchieved[3, ], c(0.14656813, 0.35197865), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x2), NA))) expect_output(print(x2)$show()) invisible(capture.output(expect_error(summary(x2), NA))) expect_output(summary(x2)$show()) x2CodeBased <- eval(parse(text = getObjectRCode(x2, stringWrapParagraphWidth = NULL))) expect_equal(x2CodeBased$iterations, x2$iterations, tolerance = 1e-05) expect_equal(x2CodeBased$rejectAtLeastOne, x2$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x2CodeBased$rejectedArmsPerStage, x2$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$futilityStop, x2$futilityStop, tolerance = 1e-05) expect_equal(x2CodeBased$futilityPerStage, x2$futilityPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$earlyStop, x2$earlyStop, tolerance = 1e-05) expect_equal(x2CodeBased$successPerStage, x2$successPerStage, tolerance = 1e-05) expect_equal(x2CodeBased$selectedArms, x2$selectedArms, tolerance = 1e-05) expect_equal(x2CodeBased$numberOfActiveArms, x2$numberOfActiveArms, tolerance = 1e-05) expect_equal(x2CodeBased$expectedNumberOfSubjects, x2$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x2CodeBased$sampleSizes, x2$sampleSizes, tolerance = 1e-05) expect_equal(x2CodeBased$conditionalPowerAchieved, x2$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x2), "character") df <- as.data.frame(x2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x3 <- getSimulationMultiArmRates( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), typeOfShape = "sigmoidEmax", gED50 = 2, slope = 0.5, activeArms = 4, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x3' with expected results expect_equal(x3$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x3$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x3$iterations[3, ], c(10, 10, 10, 10)) expect_equal(x3$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x3$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x3$futilityStop, c(0, 0, 0, 0)) expect_equal(x3$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x3$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x3$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x3$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x3$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x3$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x3$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x3$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.3, 0.3, 1, 0.2, 0.2, 1, 0.4, 0.4, 1, 0.2, 0.2, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0.4, 0.4, 1, 0.4, 0.4, 1, 0.1, 0.1, 1, 0.3, 0.3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), tolerance = 1e-07) expect_equal(x3$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x3$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x3$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x3$expectedNumberOfSubjects, c(434.8, 402, 440, 425), tolerance = 1e-07) expect_equal(unlist(as.list(x3$sampleSizes)), c(10, 0, 0, 10, 0, 0, 10, 15, 20, 10, 10, 10, 10, 30, 30, 10, 12.7, 20, 10, 40, 40, 10, 20, 20, 10, 30, 30, 10, 29.1, 34.2, 10, 30, 30, 10, 33.4, 40, 10, 32.4, 40, 10, 40, 40, 10, 10, 10, 10, 26.7, 27.4, 10, 92.4, 100, 10, 81.8, 94.2, 10, 95, 100, 10, 90.1, 97.4), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x3$conditionalPowerAchieved[2, ], c(0.0098526063, 0.0022619481, 0.010226943, 0.0071111057), tolerance = 1e-07) expect_equal(x3$conditionalPowerAchieved[3, ], c(0.00025317548, 0.089328639, 4.5501958e-05, 0.12015791), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x3), NA))) expect_output(print(x3)$show()) invisible(capture.output(expect_error(summary(x3), NA))) expect_output(summary(x3)$show()) x3CodeBased <- eval(parse(text = getObjectRCode(x3, stringWrapParagraphWidth = NULL))) expect_equal(x3CodeBased$iterations, x3$iterations, tolerance = 1e-05) expect_equal(x3CodeBased$rejectAtLeastOne, x3$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x3CodeBased$rejectedArmsPerStage, x3$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$futilityStop, x3$futilityStop, tolerance = 1e-05) expect_equal(x3CodeBased$futilityPerStage, x3$futilityPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$earlyStop, x3$earlyStop, tolerance = 1e-05) expect_equal(x3CodeBased$successPerStage, x3$successPerStage, tolerance = 1e-05) expect_equal(x3CodeBased$selectedArms, x3$selectedArms, tolerance = 1e-05) expect_equal(x3CodeBased$numberOfActiveArms, x3$numberOfActiveArms, tolerance = 1e-05) expect_equal(x3CodeBased$expectedNumberOfSubjects, x3$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x3CodeBased$sampleSizes, x3$sampleSizes, tolerance = 1e-05) expect_equal(x3CodeBased$conditionalPowerAchieved, x3$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x3), "character") df <- as.data.frame(x3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x4 <- getSimulationMultiArmRates( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "all", plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x4' with expected results expect_equal(x4$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x4$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x4$iterations[3, ], c(10, 10, 10, 10)) expect_equal(x4$rejectAtLeastOne, c(0, 0.3, 0.7, 1), tolerance = 1e-07) expect_equal(unlist(as.list(x4$rejectedArmsPerStage)), c(0, 0, 0, 0, 0.1, 0, 0, 0, 0.1, 0, 0, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0.3, 0, 0.3, 0.4, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0.3, 0, 0.6, 0.2, 0, 0, 0, 0, 0, 0.2, 0, 0.4, 0.3, 0, 0.8, 0.2), tolerance = 1e-07) expect_equal(x4$futilityStop, c(0, 0, 0, 0)) expect_equal(x4$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x4$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x4$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x4$earlyStop[2, ], c(0, 0, 0, 0)) expect_equal(x4$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x4$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x4$successPerStage[3, ], c(0, 0, 0.1, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x4$selectedArms)), c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)) expect_equal(x4$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x4$numberOfActiveArms[2, ], c(4, 4, 4, 4)) expect_equal(x4$numberOfActiveArms[3, ], c(4, 4, 4, 4)) expect_equal(x4$expectedNumberOfSubjects, c(1026, 1002, 924.5, 714.5), tolerance = 1e-07) expect_equal(unlist(as.list(x4$sampleSizes)), c(10, 95.2, 100, 10, 100, 90.4, 10, 91.8, 83.1, 10, 100, 32.9, 10, 95.2, 100, 10, 100, 90.4, 10, 91.8, 83.1, 10, 100, 32.9, 10, 95.2, 100, 10, 100, 90.4, 10, 91.8, 83.1, 10, 100, 32.9, 10, 95.2, 100, 10, 100, 90.4, 10, 91.8, 83.1, 10, 100, 32.9, 10, 95.2, 100, 10, 100, 90.4, 10, 91.8, 83.1, 10, 100, 32.9), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x4$conditionalPowerAchieved[2, ], c(0.16336896, 3.7379108e-06, 0.18421481, 0.069788183), tolerance = 1e-07) expect_equal(x4$conditionalPowerAchieved[3, ], c(0.00052547754, 0.089531131, 0.32040425, 0.67566016), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x4), NA))) expect_output(print(x4)$show()) invisible(capture.output(expect_error(summary(x4), NA))) expect_output(summary(x4)$show()) x4CodeBased <- eval(parse(text = getObjectRCode(x4, stringWrapParagraphWidth = NULL))) expect_equal(x4CodeBased$iterations, x4$iterations, tolerance = 1e-05) expect_equal(x4CodeBased$rejectAtLeastOne, x4$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x4CodeBased$rejectedArmsPerStage, x4$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$futilityStop, x4$futilityStop, tolerance = 1e-05) expect_equal(x4CodeBased$futilityPerStage, x4$futilityPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$earlyStop, x4$earlyStop, tolerance = 1e-05) expect_equal(x4CodeBased$successPerStage, x4$successPerStage, tolerance = 1e-05) expect_equal(x4CodeBased$selectedArms, x4$selectedArms, tolerance = 1e-05) expect_equal(x4CodeBased$numberOfActiveArms, x4$numberOfActiveArms, tolerance = 1e-05) expect_equal(x4CodeBased$expectedNumberOfSubjects, x4$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x4CodeBased$sampleSizes, x4$sampleSizes, tolerance = 1e-05) expect_equal(x4CodeBased$conditionalPowerAchieved, x4$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x4), "character") df <- as.data.frame(x4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x5 <- getSimulationMultiArmRates( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "rBest", rValue = 2, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x5' with expected results expect_equal(x5$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x5$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x5$iterations[3, ], c(10, 10, 8, 6)) expect_equal(x5$rejectAtLeastOne, c(0, 0.3, 0.9, 1), tolerance = 1e-07) expect_equal(unlist(as.list(x5$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.1, 0, 0.2, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.2, 0.2, 0, 0.3, 0.1, 0, 0, 0, 0, 0, 0.2, 0, 0.3, 0.4, 0, 0.8, 0.1), tolerance = 1e-07) expect_equal(x5$futilityStop, c(0, 0, 0, 0)) expect_equal(x5$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x5$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x5$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x5$earlyStop[2, ], c(0, 0, 0.2, 0.4), tolerance = 1e-07) expect_equal(x5$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x5$successPerStage[2, ], c(0, 0, 0.2, 0.4), tolerance = 1e-07) expect_equal(x5$successPerStage[3, ], c(0, 0, 0.3, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x5$selectedArms)), c(1, 0.8, 0.8, 1, 0.3, 0.3, 1, 0.2, 0.2, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.4, 0.4, 1, 0.4, 0.2, 1, 0.3, 0.1, 1, 0.6, 0.6, 1, 0.5, 0.5, 1, 0.6, 0.5, 1, 0.6, 0.3, 1, 0.5, 0.5, 1, 0.8, 0.8, 1, 0.8, 0.7, 1, 0.9, 0.6, 1, 1, 1, 1, 1, 1, 1, 1, 0.8, 1, 1, 0.6), tolerance = 1e-07) expect_equal(x5$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x5$numberOfActiveArms[2, ], c(2, 2, 2, 2)) expect_equal(x5$numberOfActiveArms[3, ], c(2, 2, 2, 2)) expect_equal(x5$expectedNumberOfSubjects, c(642.8, 566.9, 399.8, 265.1), tolerance = 1e-07) expect_equal(unlist(as.list(x5$sampleSizes)), c(10, 77.6, 80, 10, 23.9, 30, 10, 20, 1, 10, 12.3, 1.3333333, 10, 10, 10, 10, 30.2, 28.6, 10, 28.6, 25, 10, 20, 3.1666667, 10, 60, 60, 10, 49.7, 41.1, 10, 37.4, 28.25, 10, 40.8, 9.8333333, 10, 47.6, 50, 10, 63.8, 77.3, 10, 61.2, 53.25, 10, 53.1, 14.333333, 10, 97.6, 100, 10, 83.8, 88.5, 10, 73.6, 53.75, 10, 63.1, 14.333333), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x5$conditionalPowerAchieved[2, ], c(0.080486965, 0.12759682, 0.10458054, 0.065420449), tolerance = 1e-07) expect_equal(x5$conditionalPowerAchieved[3, ], c(0.022470074, 0.31122739, 0.58569198, 0.85520318), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x5), NA))) expect_output(print(x5)$show()) invisible(capture.output(expect_error(summary(x5), NA))) expect_output(summary(x5)$show()) x5CodeBased <- eval(parse(text = getObjectRCode(x5, stringWrapParagraphWidth = NULL))) expect_equal(x5CodeBased$iterations, x5$iterations, tolerance = 1e-05) expect_equal(x5CodeBased$rejectAtLeastOne, x5$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x5CodeBased$rejectedArmsPerStage, x5$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$futilityStop, x5$futilityStop, tolerance = 1e-05) expect_equal(x5CodeBased$futilityPerStage, x5$futilityPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$earlyStop, x5$earlyStop, tolerance = 1e-05) expect_equal(x5CodeBased$successPerStage, x5$successPerStage, tolerance = 1e-05) expect_equal(x5CodeBased$selectedArms, x5$selectedArms, tolerance = 1e-05) expect_equal(x5CodeBased$numberOfActiveArms, x5$numberOfActiveArms, tolerance = 1e-05) expect_equal(x5CodeBased$expectedNumberOfSubjects, x5$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x5CodeBased$sampleSizes, x5$sampleSizes, tolerance = 1e-05) expect_equal(x5CodeBased$conditionalPowerAchieved, x5$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x5), "character") df <- as.data.frame(x5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x6 <- getSimulationMultiArmRates( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x6' with expected results expect_equal(x6$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x6$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x6$iterations[3, ], c(10, 10, 8, 7)) expect_equal(x6$rejectAtLeastOne, c(0, 0.4, 0.8, 1), tolerance = 1e-07) expect_equal(unlist(as.list(x6$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0.2, 0, 0.3, 0.4, 0, 0, 0, 0, 0, 0.4, 0, 0.2, 0.5, 0, 0.4, 0.5), tolerance = 1e-07) expect_equal(x6$futilityStop, c(0, 0, 0, 0)) expect_equal(x6$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x6$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x6$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x6$earlyStop[2, ], c(0, 0, 0.2, 0.3), tolerance = 1e-07) expect_equal(x6$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x6$successPerStage[2, ], c(0, 0, 0.2, 0.3), tolerance = 1e-07) expect_equal(x6$successPerStage[3, ], c(0, 0.4, 0.6, 0.6), tolerance = 1e-07) expect_equal(unlist(as.list(x6$selectedArms)), c(1, 0.2, 0.2, 1, 0.4, 0.1, 1, 0.3, 0.1, 1, 0.1, 0.1, 1, 0.4, 0.4, 1, 0.1, 0, 1, 0.2, 0.1, 1, 0.2, 0, 1, 0.4, 0.4, 1, 0.4, 0.4, 1, 0.3, 0.2, 1, 0.9, 0.4, 1, 0.3, 0.3, 1, 0.7, 0.7, 1, 0.9, 0.7, 1, 0.9, 0.6, 1, 1, 1, 1, 1, 1, 1, 1, 0.8, 1, 1, 0.7), tolerance = 1e-07) expect_equal(x6$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x6$numberOfActiveArms[2, ], c(1.3, 1.6, 1.7, 2.1), tolerance = 1e-07) expect_equal(x6$numberOfActiveArms[3, ], c(1.3, 1.2, 1.375, 1.5714286), tolerance = 1e-07) expect_equal(x6$expectedNumberOfSubjects, c(436.4, 438.6, 346.7, 372.5), tolerance = 1e-07) expect_equal(unlist(as.list(x6$sampleSizes)), c(10, 16.7, 20, 10, 27.9, 10, 10, 9.1, 12.5, 10, 1.2, 14.285714, 10, 37.5, 40, 10, 1.2, 0, 10, 12.3, 11.625, 10, 7.9, 0, 10, 32.4, 32.5, 10, 31.2, 40, 10, 21.5, 13.375, 10, 63.2, 50.142857, 10, 15.4, 28.7, 10, 56.2, 59, 10, 60.4, 63, 10, 58, 51.714286, 10, 72, 91.2, 10, 74.1, 89, 10, 61.9, 63.875, 10, 64.7, 66), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x6$conditionalPowerAchieved[2, ], c(0.031688257, 0.035836944, 0.12967885, 0.10427074), tolerance = 1e-07) expect_equal(x6$conditionalPowerAchieved[3, ], c(0.2491354, 0.21222327, 0.47711159, 0.3978836), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x6), NA))) expect_output(print(x6)$show()) invisible(capture.output(expect_error(summary(x6), NA))) expect_output(summary(x6)$show()) x6CodeBased <- eval(parse(text = getObjectRCode(x6, stringWrapParagraphWidth = NULL))) expect_equal(x6CodeBased$iterations, x6$iterations, tolerance = 1e-05) expect_equal(x6CodeBased$rejectAtLeastOne, x6$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x6CodeBased$rejectedArmsPerStage, x6$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$futilityStop, x6$futilityStop, tolerance = 1e-05) expect_equal(x6CodeBased$futilityPerStage, x6$futilityPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$earlyStop, x6$earlyStop, tolerance = 1e-05) expect_equal(x6CodeBased$successPerStage, x6$successPerStage, tolerance = 1e-05) expect_equal(x6CodeBased$selectedArms, x6$selectedArms, tolerance = 1e-05) expect_equal(x6CodeBased$numberOfActiveArms, x6$numberOfActiveArms, tolerance = 1e-05) expect_equal(x6CodeBased$expectedNumberOfSubjects, x6$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x6CodeBased$sampleSizes, x6$sampleSizes, tolerance = 1e-05) expect_equal(x6CodeBased$conditionalPowerAchieved, x6$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x6), "character") df <- as.data.frame(x6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x7 <- getSimulationMultiArmRates( seed = 1234, getDesignInverseNormal(informationRates = c(0.2, 0.6, 1)), activeArms = 4, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x7' with expected results expect_equal(x7$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x7$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x7$iterations[3, ], c(10, 9, 8, 5)) expect_equal(x7$rejectAtLeastOne, c(0, 0.4, 0.5, 1), tolerance = 1e-07) expect_equal(unlist(as.list(x7$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.2, 0, 0.1, 0.2, 0, 0, 0, 0, 0.1, 0.2, 0, 0.2, 0, 0, 0.4, 0.3), tolerance = 1e-07) expect_equal(x7$futilityStop, c(0, 0, 0, 0)) expect_equal(x7$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x7$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x7$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x7$earlyStop[2, ], c(0, 0.1, 0.2, 0.5), tolerance = 1e-07) expect_equal(x7$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x7$successPerStage[2, ], c(0, 0.1, 0.2, 0.5), tolerance = 1e-07) expect_equal(x7$successPerStage[3, ], c(0, 0.3, 0.3, 0.5), tolerance = 1e-07) expect_equal(unlist(as.list(x7$selectedArms)), c(1, 0.2, 0.2, 1, 0, 0, 1, 0.2, 0.2, 1, 0, 0, 1, 0.3, 0.3, 1, 0, 0, 1, 0.2, 0.2, 1, 0, 0, 1, 0.2, 0.2, 1, 0.4, 0.4, 1, 0.2, 0.2, 1, 0.3, 0.2, 1, 0.3, 0.3, 1, 0.6, 0.5, 1, 0.4, 0.2, 1, 0.7, 0.3, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.8, 1, 1, 0.5), tolerance = 1e-07) expect_equal(x7$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x7$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x7$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x7$expectedNumberOfSubjects, c(355.2, 334, 233, 193.4), tolerance = 1e-07) expect_equal(unlist(as.list(x7$sampleSizes)), c(10, 20, 20, 10, 0, 0, 10, 20, 25, 10, 0, 0, 10, 30, 30, 10, 0, 0, 10, 3, 3.75, 10, 0, 0, 10, 12.4, 12.4, 10, 20.7, 22.777778, 10, 15, 18.75, 10, 16.2, 26.4, 10, 13.9, 13.9, 10, 54.2, 51.777778, 10, 13.1, 3, 10, 30.3, 24, 10, 76.3, 76.3, 10, 74.9, 74.555556, 10, 51.1, 50.5, 10, 46.5, 50.4), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x7$conditionalPowerAchieved[2, ], c(0.035427106, 0.012436575, 0.08338715, 0.046283385), tolerance = 1e-07) expect_equal(x7$conditionalPowerAchieved[3, ], c(0.076058567, 0.27636533, 0.46741694, 0.70493817), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x7), NA))) expect_output(print(x7)$show()) invisible(capture.output(expect_error(summary(x7), NA))) expect_output(summary(x7)$show()) x7CodeBased <- eval(parse(text = getObjectRCode(x7, stringWrapParagraphWidth = NULL))) expect_equal(x7CodeBased$iterations, x7$iterations, tolerance = 1e-05) expect_equal(x7CodeBased$rejectAtLeastOne, x7$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x7CodeBased$rejectedArmsPerStage, x7$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$futilityStop, x7$futilityStop, tolerance = 1e-05) expect_equal(x7CodeBased$futilityPerStage, x7$futilityPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$earlyStop, x7$earlyStop, tolerance = 1e-05) expect_equal(x7CodeBased$successPerStage, x7$successPerStage, tolerance = 1e-05) expect_equal(x7CodeBased$selectedArms, x7$selectedArms, tolerance = 1e-05) expect_equal(x7CodeBased$numberOfActiveArms, x7$numberOfActiveArms, tolerance = 1e-05) expect_equal(x7CodeBased$expectedNumberOfSubjects, x7$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x7CodeBased$sampleSizes, x7$sampleSizes, tolerance = 1e-05) expect_equal(x7CodeBased$conditionalPowerAchieved, x7$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x7), "character") df <- as.data.frame(x7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x8 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "all", plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x8' with expected results expect_equal(x8$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x8$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x8$iterations[3, ], c(10, 10, 9, 8)) expect_equal(x8$rejectAtLeastOne, c(0, 0.2, 0.9, 1), tolerance = 1e-07) expect_equal(unlist(as.list(x8$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0.1, 0.3, 0, 0.2, 0.4, 0, 0, 0, 0, 0, 0.1, 0, 0.3, 0.3, 0.1, 0.4, 0.5, 0, 0, 0, 0, 0, 0.1, 0, 0.4, 0.5, 0.1, 0.8, 0.1), tolerance = 1e-07) expect_equal(x8$futilityStop, c(0, 0, 0, 0)) expect_equal(x8$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x8$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x8$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x8$earlyStop[2, ], c(0, 0, 0.1, 0.2), tolerance = 1e-07) expect_equal(x8$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x8$successPerStage[2, ], c(0, 0, 0.1, 0.2), tolerance = 1e-07) expect_equal(x8$successPerStage[3, ], c(0, 0, 0.1, 0), tolerance = 1e-07) expect_equal(unlist(as.list(x8$selectedArms)), c(1, 1, 1, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.8, 1, 1, 1, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.8, 1, 1, 1, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.8, 1, 1, 1, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.8, 1, 1, 1, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.8), tolerance = 1e-07) expect_equal(x8$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x8$numberOfActiveArms[2, ], c(4, 4, 4, 4)) expect_equal(x8$numberOfActiveArms[3, ], c(4, 4, 4, 4)) expect_equal(x8$expectedNumberOfSubjects, c(952, 1050, 909.5, 860), tolerance = 1e-07) expect_equal(unlist(as.list(x8$sampleSizes)), c(10, 90.2, 90.2, 10, 100, 100, 10, 91, 89.888889, 10, 91, 88.75, 10, 90.2, 90.2, 10, 100, 100, 10, 91, 89.888889, 10, 91, 88.75, 10, 90.2, 90.2, 10, 100, 100, 10, 91, 89.888889, 10, 91, 88.75, 10, 90.2, 90.2, 10, 100, 100, 10, 91, 89.888889, 10, 91, 88.75, 10, 90.2, 90.2, 10, 100, 100, 10, 91, 89.888889, 10, 91, 88.75), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x8$conditionalPowerAchieved[2, ], c(0.16068828, 0.022112719, 0.21849189, 0.19646842), tolerance = 1e-07) expect_equal(x8$conditionalPowerAchieved[3, ], c(0.0018216452, 0.044801331, 0.47086458, 0.69046124), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x8), NA))) expect_output(print(x8)$show()) invisible(capture.output(expect_error(summary(x8), NA))) expect_output(summary(x8)$show()) x8CodeBased <- eval(parse(text = getObjectRCode(x8, stringWrapParagraphWidth = NULL))) expect_equal(x8CodeBased$iterations, x8$iterations, tolerance = 1e-05) expect_equal(x8CodeBased$rejectAtLeastOne, x8$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x8CodeBased$rejectedArmsPerStage, x8$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$futilityStop, x8$futilityStop, tolerance = 1e-05) expect_equal(x8CodeBased$futilityPerStage, x8$futilityPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$earlyStop, x8$earlyStop, tolerance = 1e-05) expect_equal(x8CodeBased$successPerStage, x8$successPerStage, tolerance = 1e-05) expect_equal(x8CodeBased$selectedArms, x8$selectedArms, tolerance = 1e-05) expect_equal(x8CodeBased$numberOfActiveArms, x8$numberOfActiveArms, tolerance = 1e-05) expect_equal(x8CodeBased$expectedNumberOfSubjects, x8$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x8CodeBased$sampleSizes, x8$sampleSizes, tolerance = 1e-05) expect_equal(x8CodeBased$conditionalPowerAchieved, x8$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x8), "character") df <- as.data.frame(x8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x9 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "rBest", rValue = 2, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x9' with expected results expect_equal(x9$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x9$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x9$iterations[3, ], c(10, 10, 10, 5)) expect_equal(x9$rejectAtLeastOne, c(0, 0.2, 0.7, 1), tolerance = 1e-07) expect_equal(unlist(as.list(x9$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1, 0.1, 0.1, 0.1, 0, 0, 0, 0, 0, 0.1, 0.1, 0, 0.2, 0, 0.4, 0.2, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0.3, 0.1, 0.6, 0.1), tolerance = 1e-07) expect_equal(x9$futilityStop, c(0, 0, 0, 0)) expect_equal(x9$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x9$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x9$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x9$earlyStop[2, ], c(0, 0, 0, 0.5), tolerance = 1e-07) expect_equal(x9$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x9$successPerStage[2, ], c(0, 0, 0, 0.5), tolerance = 1e-07) expect_equal(x9$successPerStage[3, ], c(0, 0.1, 0.1, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x9$selectedArms)), c(1, 0.6, 0.6, 1, 0.4, 0.4, 1, 0.3, 0.3, 1, 0.2, 0.1, 1, 0.7, 0.7, 1, 0.2, 0.2, 1, 0.5, 0.5, 1, 0.3, 0.2, 1, 0.5, 0.5, 1, 0.7, 0.7, 1, 0.6, 0.6, 1, 0.6, 0.3, 1, 0.2, 0.2, 1, 0.7, 0.7, 1, 0.6, 0.6, 1, 0.9, 0.4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5), tolerance = 1e-07) expect_equal(x9$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x9$numberOfActiveArms[2, ], c(2, 2, 2, 2)) expect_equal(x9$numberOfActiveArms[3, ], c(2, 2, 2, 2)) expect_equal(x9$expectedNumberOfSubjects, c(603.2, 605.9, 453.2, 361.7), tolerance = 1e-07) expect_equal(unlist(as.list(x9$sampleSizes)), c(10, 52.2, 52.2, 10, 33.6, 33.5, 10, 21.2, 21.2, 10, 9.2, 17.6, 10, 70, 70, 10, 20, 20, 10, 35.6, 35.3, 10, 19.7, 21.4, 10, 45.3, 45.3, 10, 62.7, 62.6, 10, 36.2, 35.8, 10, 52.8, 45.4, 10, 16.9, 16.9, 10, 69.1, 69.1, 10, 41.8, 41.7, 10, 61.7, 44.4, 10, 92.2, 92.2, 10, 92.7, 92.6, 10, 67.4, 67, 10, 71.7, 64.4), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x9$conditionalPowerAchieved[2, ], c(0.083443128, 0.076003514, 0.14647721, 0.085145955), tolerance = 1e-07) expect_equal(x9$conditionalPowerAchieved[3, ], c(0.043093175, 0.13127607, 0.3479275, 0.64693149), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x9), NA))) expect_output(print(x9)$show()) invisible(capture.output(expect_error(summary(x9), NA))) expect_output(summary(x9)$show()) x9CodeBased <- eval(parse(text = getObjectRCode(x9, stringWrapParagraphWidth = NULL))) expect_equal(x9CodeBased$iterations, x9$iterations, tolerance = 1e-05) expect_equal(x9CodeBased$rejectAtLeastOne, x9$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x9CodeBased$rejectedArmsPerStage, x9$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$futilityStop, x9$futilityStop, tolerance = 1e-05) expect_equal(x9CodeBased$futilityPerStage, x9$futilityPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$earlyStop, x9$earlyStop, tolerance = 1e-05) expect_equal(x9CodeBased$successPerStage, x9$successPerStage, tolerance = 1e-05) expect_equal(x9CodeBased$selectedArms, x9$selectedArms, tolerance = 1e-05) expect_equal(x9CodeBased$numberOfActiveArms, x9$numberOfActiveArms, tolerance = 1e-05) expect_equal(x9CodeBased$expectedNumberOfSubjects, x9$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x9CodeBased$sampleSizes, x9$sampleSizes, tolerance = 1e-05) expect_equal(x9CodeBased$conditionalPowerAchieved, x9$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x9), "character") df <- as.data.frame(x9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x10 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x10' with expected results expect_equal(x10$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x10$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x10$iterations[3, ], c(10, 9, 7, 6)) expect_equal(x10$rejectAtLeastOne, c(0, 0.2, 0.6, 0.6), tolerance = 1e-07) expect_equal(unlist(as.list(x10$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.1, 0.1, 0, 0.1, 0.1, 0, 0, 0, 0, 0.1, 0, 0, 0.2, 0.1, 0, 0.3, 0, 0, 0, 0, 0, 0.1, 0, 0, 0.3, 0, 0, 0.3, 0.1), tolerance = 1e-07) expect_equal(x10$futilityStop, c(0, 0, 0, 0)) expect_equal(x10$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x10$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x10$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x10$earlyStop[2, ], c(0, 0.1, 0.3, 0.4), tolerance = 1e-07) expect_equal(x10$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x10$successPerStage[2, ], c(0, 0.1, 0.3, 0.4), tolerance = 1e-07) expect_equal(x10$successPerStage[3, ], c(0, 0, 0.2, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x10$selectedArms)), c(1, 0.2, 0.2, 1, 0.4, 0.4, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.3, 0.3, 1, 0.3, 0.2, 1, 0.3, 0.2, 1, 0.5, 0.4, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0.5, 0.5, 1, 0.5, 0.3, 1, 0.4, 0.4, 1, 0.7, 0.6, 1, 0.5, 0.3, 1, 0.5, 0.3, 1, 1, 1, 1, 1, 0.9, 1, 1, 0.7, 1, 1, 0.6), tolerance = 1e-07) expect_equal(x10$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x10$numberOfActiveArms[2, ], c(1.2, 1.8, 1.5, 1.6), tolerance = 1e-07) expect_equal(x10$numberOfActiveArms[3, ], c(1.2, 1.7777778, 1.7142857, 1.8333333), tolerance = 1e-07) expect_equal(x10$expectedNumberOfSubjects, c(313.2, 474, 363.7, 263.7), tolerance = 1e-07) expect_equal(unlist(as.list(x10$sampleSizes)), c(10, 15.9, 15.8, 10, 35.9, 39.777778, 10, 12.7, 18, 10, 2.8, 4.6666667, 10, 22.2, 22.2, 10, 30, 22.222222, 10, 22.7, 28.571429, 10, 27.4, 43.166667, 10, 18.1, 18, 10, 32.8, 36.444444, 10, 38.6, 54.857143, 10, 26.7, 26.5, 10, 15.8, 15.8, 10, 54.9, 49.777778, 10, 37.3, 24.571429, 10, 24.9, 23.666667, 10, 59.8, 59.6, 10, 73.6, 70.444444, 10, 68.6, 65.142857, 10, 43, 50.166667), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x10$conditionalPowerAchieved[2, ], c(0.067103341, 0.011749166, 0.024807536, 0.13720867), tolerance = 1e-07) expect_equal(x10$conditionalPowerAchieved[3, ], c(0.10265269, 0.46661697, 0.4198773, 0.2422132), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x10), NA))) expect_output(print(x10)$show()) invisible(capture.output(expect_error(summary(x10), NA))) expect_output(summary(x10)$show()) x10CodeBased <- eval(parse(text = getObjectRCode(x10, stringWrapParagraphWidth = NULL))) expect_equal(x10CodeBased$iterations, x10$iterations, tolerance = 1e-05) expect_equal(x10CodeBased$rejectAtLeastOne, x10$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x10CodeBased$rejectedArmsPerStage, x10$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$futilityStop, x10$futilityStop, tolerance = 1e-05) expect_equal(x10CodeBased$futilityPerStage, x10$futilityPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$earlyStop, x10$earlyStop, tolerance = 1e-05) expect_equal(x10CodeBased$successPerStage, x10$successPerStage, tolerance = 1e-05) expect_equal(x10CodeBased$selectedArms, x10$selectedArms, tolerance = 1e-05) expect_equal(x10CodeBased$numberOfActiveArms, x10$numberOfActiveArms, tolerance = 1e-05) expect_equal(x10CodeBased$expectedNumberOfSubjects, x10$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x10CodeBased$sampleSizes, x10$sampleSizes, tolerance = 1e-05) expect_equal(x10CodeBased$conditionalPowerAchieved, x10$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x10), "character") df <- as.data.frame(x10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x11 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.1, 0.3, 0.1), intersectionTest = "Bonferroni", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), directionUpper = FALSE, maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x11' with expected results expect_equal(x11$iterations[1, ], c(10, 10, 10)) expect_equal(x11$iterations[2, ], c(8, 5, 9)) expect_equal(x11$iterations[3, ], c(4, 4, 6)) expect_equal(x11$rejectAtLeastOne, c(0.4, 0, 0), tolerance = 1e-07) expect_equal(unlist(as.list(x11$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0.3, 0, 0, 0, 0, 0, 0, 0), tolerance = 1e-07) expect_equal(x11$futilityStop, c(0.2, 0.6, 0.4), tolerance = 1e-07) expect_equal(x11$futilityPerStage[1, ], c(0.2, 0.5, 0.1), tolerance = 1e-07) expect_equal(x11$futilityPerStage[2, ], c(0, 0.1, 0.3), tolerance = 1e-07) expect_equal(x11$earlyStop[1, ], c(0.2, 0.5, 0.1), tolerance = 1e-07) expect_equal(x11$earlyStop[2, ], c(0.4, 0.1, 0.3), tolerance = 1e-07) expect_equal(x11$successPerStage[1, ], c(0, 0, 0)) expect_equal(x11$successPerStage[2, ], c(0.4, 0, 0), tolerance = 1e-07) expect_equal(x11$successPerStage[3, ], c(0, 0, 0)) expect_equal(unlist(as.list(x11$selectedArms)), c(1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.3, 0.1, 1, 0.2, 0.2, 1, 0.3, 0.2, 1, 0.1, 0.1, 1, 0.2, 0.1, 1, 0, 0, 1, 0.2, 0.2, 1, 0.3, 0, 1, 0.1, 0.1, 1, 0.3, 0.2, 1, 0.8, 0.4, 1, 0.5, 0.4, 1, 0.9, 0.6), tolerance = 1e-07) expect_equal(x11$numberOfActiveArms[1, ], c(4, 4, 4)) expect_equal(x11$numberOfActiveArms[2, ], c(1, 1, 1)) expect_equal(x11$numberOfActiveArms[3, ], c(1, 1, 1)) expect_equal(x11$expectedNumberOfSubjects, c(200.6, 150, 279), tolerance = 1e-07) expect_equal(unlist(as.list(x11$sampleSizes)), c(10, 1, 3.25, 10, 2.8, 25, 10, 17.333333, 15.666667, 10, 6.25, 50, 10, 23.2, 32, 10, 11.111111, 16.666667, 10, 14.5, 21, 10, 0, 0, 10, 15.777778, 33.333333, 10, 35.25, 0, 10, 8.4, 25, 10, 17, 33.333333, 10, 57, 74.25, 10, 34.4, 82, 10, 61.222222, 99), tolerance = 1e-07) expect_equal(x11$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x11$conditionalPowerAchieved[2, ], c(0.10402635, 0.15240707, 0.070533409), tolerance = 1e-07) expect_equal(x11$conditionalPowerAchieved[3, ], c(0.68219789, 0.38677479, 0.34246832), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x11), NA))) expect_output(print(x11)$show()) invisible(capture.output(expect_error(summary(x11), NA))) expect_output(summary(x11)$show()) x11CodeBased <- eval(parse(text = getObjectRCode(x11, stringWrapParagraphWidth = NULL))) expect_equal(x11CodeBased$iterations, x11$iterations, tolerance = 1e-05) expect_equal(x11CodeBased$rejectAtLeastOne, x11$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x11CodeBased$rejectedArmsPerStage, x11$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$futilityStop, x11$futilityStop, tolerance = 1e-05) expect_equal(x11CodeBased$futilityPerStage, x11$futilityPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$earlyStop, x11$earlyStop, tolerance = 1e-05) expect_equal(x11CodeBased$successPerStage, x11$successPerStage, tolerance = 1e-05) expect_equal(x11CodeBased$selectedArms, x11$selectedArms, tolerance = 1e-05) expect_equal(x11CodeBased$numberOfActiveArms, x11$numberOfActiveArms, tolerance = 1e-05) expect_equal(x11CodeBased$expectedNumberOfSubjects, x11$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x11CodeBased$sampleSizes, x11$sampleSizes, tolerance = 1e-05) expect_equal(x11CodeBased$conditionalPowerAchieved, x11$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x11), "character") df <- as.data.frame(x11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x12 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, threshold = 0, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), intersectionTest = "Bonferroni", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x12' with expected results expect_equal(x12$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x12$iterations[2, ], c(6, 6, 7, 9)) expect_equal(x12$iterations[3, ], c(3, 4, 5, 4)) expect_equal(x12$rejectAtLeastOne, c(0, 0, 0.5, 0.8), tolerance = 1e-07) expect_equal(unlist(as.list(x12$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0.3, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.2, 0, 0.2, 0.2), tolerance = 1e-07) expect_equal(x12$futilityStop, c(0.7, 0.6, 0.3, 0.1), tolerance = 1e-07) expect_equal(x12$futilityPerStage[1, ], c(0.4, 0.4, 0.3, 0.1), tolerance = 1e-07) expect_equal(x12$futilityPerStage[2, ], c(0.3, 0.2, 0, 0), tolerance = 1e-07) expect_equal(x12$earlyStop[1, ], c(0.4, 0.4, 0.3, 0.1), tolerance = 1e-07) expect_equal(x12$earlyStop[2, ], c(0.3, 0.2, 0.2, 0.5), tolerance = 1e-07) expect_equal(x12$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x12$successPerStage[2, ], c(0, 0, 0.2, 0.5), tolerance = 1e-07) expect_equal(x12$successPerStage[3, ], c(0, 0, 0.3, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x12$selectedArms)), c(1, 0.1, 0.1, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.2, 0, 1, 0.2, 0.1, 1, 0.1, 0.1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0.1, 0.1, 1, 0.4, 0.1, 1, 0.3, 0.2, 1, 0.2, 0.1, 1, 0.4, 0.2, 1, 0.4, 0.2, 1, 0.6, 0.3, 1, 0.6, 0.4, 1, 0.7, 0.5, 1, 0.9, 0.4), tolerance = 1e-07) expect_equal(x12$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x12$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x12$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x12$expectedNumberOfSubjects, c(188, 175, 176, 185)) expect_equal(unlist(as.list(x12$sampleSizes)), c(10, 9.8333333, 32, 10, 10.833333, 27.5, 10, 14.285714, 20, 10, 5.8888889, 25, 10, 18.833333, 0, 10, 33.333333, 25, 10, 2, 20, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 2, 2, 10, 13.333333, 22.75, 10, 37, 66.666667, 10, 8.3333333, 25, 10, 20.857143, 29.2, 10, 18.444444, 36.25, 10, 65.666667, 98.666667, 10, 52.5, 77.5, 10, 39.142857, 71.2, 10, 37.666667, 84), tolerance = 1e-07) expect_equal(x12$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x12$conditionalPowerAchieved[2, ], c(0.1067614, 0.028335233, 0.15675994, 0.029094411), tolerance = 1e-07) expect_equal(x12$conditionalPowerAchieved[3, ], c(0.43970154, 0.38730712, 0.69132205, 0.60200615), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x12), NA))) expect_output(print(x12)$show()) invisible(capture.output(expect_error(summary(x12), NA))) expect_output(summary(x12)$show()) x12CodeBased <- eval(parse(text = getObjectRCode(x12, stringWrapParagraphWidth = NULL))) expect_equal(x12CodeBased$iterations, x12$iterations, tolerance = 1e-05) expect_equal(x12CodeBased$rejectAtLeastOne, x12$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x12CodeBased$rejectedArmsPerStage, x12$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$futilityStop, x12$futilityStop, tolerance = 1e-05) expect_equal(x12CodeBased$futilityPerStage, x12$futilityPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$earlyStop, x12$earlyStop, tolerance = 1e-05) expect_equal(x12CodeBased$successPerStage, x12$successPerStage, tolerance = 1e-05) expect_equal(x12CodeBased$selectedArms, x12$selectedArms, tolerance = 1e-05) expect_equal(x12CodeBased$numberOfActiveArms, x12$numberOfActiveArms, tolerance = 1e-05) expect_equal(x12CodeBased$expectedNumberOfSubjects, x12$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x12CodeBased$sampleSizes, x12$sampleSizes, tolerance = 1e-05) expect_equal(x12CodeBased$conditionalPowerAchieved, x12$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x12), "character") df <- as.data.frame(x12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x13 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), typeOfShape = "userDefined", activeArms = 4, threshold = 0, plannedSubjects = c(10, 30, 50), piControl = 0.3, adaptations = rep(TRUE, 2), effectMatrix = matrix(c(0.1, 0.2, 0.3, 0.4, 0.2, 0.3, 0.4, 0.5), ncol = 4), intersectionTest = "Bonferroni", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x13' with expected results expect_equal(x13$iterations[1, ], c(10, 10)) expect_equal(x13$iterations[2, ], c(6, 5)) expect_equal(x13$iterations[3, ], c(6, 3)) expect_equal(x13$rejectAtLeastOne, c(0.2, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x13$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0.2, 0.2), tolerance = 1e-07) expect_equal(x13$futilityStop, c(0.4, 0.5), tolerance = 1e-07) expect_equal(x13$futilityPerStage[1, ], c(0.4, 0.5), tolerance = 1e-07) expect_equal(x13$futilityPerStage[2, ], c(0, 0)) expect_equal(x13$earlyStop[1, ], c(0.4, 0.5), tolerance = 1e-07) expect_equal(x13$earlyStop[2, ], c(0, 0.2), tolerance = 1e-07) expect_equal(x13$successPerStage[1, ], c(0, 0)) expect_equal(x13$successPerStage[2, ], c(0, 0.2), tolerance = 1e-07) expect_equal(x13$successPerStage[3, ], c(0.2, 0.2), tolerance = 1e-07) expect_equal(unlist(as.list(x13$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0, 0, 1, 0, 0, 1, 0.5, 0.5, 1, 0.4, 0.2, 1, 0.6, 0.6, 1, 0.5, 0.3), tolerance = 1e-07) expect_equal(x13$numberOfActiveArms[1, ], c(4, 4)) expect_equal(x13$numberOfActiveArms[2, ], c(1, 1)) expect_equal(x13$numberOfActiveArms[3, ], c(1, 1)) expect_equal(x13$expectedNumberOfSubjects, c(203, 169.6), tolerance = 1e-07) expect_equal(unlist(as.list(x13$sampleSizes)), c(10, 0, 0, 10, 0, 0, 10, 9.8333333, 16.666667, 10, 11.8, 10.666667, 10, 0, 0, 10, 0, 0, 10, 34.5, 66.5, 10, 63.6, 63, 10, 44.333333, 83.166667, 10, 75.4, 73.666667), tolerance = 1e-07) expect_equal(x13$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_)) expect_equal(x13$conditionalPowerAchieved[2, ], c(0.045209815, 0.0014148507), tolerance = 1e-07) expect_equal(x13$conditionalPowerAchieved[3, ], c(0.60681086, 0.72002567), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x13), NA))) expect_output(print(x13)$show()) invisible(capture.output(expect_error(summary(x13), NA))) expect_output(summary(x13)$show()) x13CodeBased <- eval(parse(text = getObjectRCode(x13, stringWrapParagraphWidth = NULL))) expect_equal(x13CodeBased$iterations, x13$iterations, tolerance = 1e-05) expect_equal(x13CodeBased$rejectAtLeastOne, x13$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x13CodeBased$rejectedArmsPerStage, x13$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x13CodeBased$futilityStop, x13$futilityStop, tolerance = 1e-05) expect_equal(x13CodeBased$futilityPerStage, x13$futilityPerStage, tolerance = 1e-05) expect_equal(x13CodeBased$earlyStop, x13$earlyStop, tolerance = 1e-05) expect_equal(x13CodeBased$successPerStage, x13$successPerStage, tolerance = 1e-05) expect_equal(x13CodeBased$selectedArms, x13$selectedArms, tolerance = 1e-05) expect_equal(x13CodeBased$numberOfActiveArms, x13$numberOfActiveArms, tolerance = 1e-05) expect_equal(x13CodeBased$expectedNumberOfSubjects, x13$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x13CodeBased$sampleSizes, x13$sampleSizes, tolerance = 1e-05) expect_equal(x13CodeBased$conditionalPowerAchieved, x13$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x13), "character") df <- as.data.frame(x13) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x13) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x14 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), typeOfShape = "sigmoidEmax", gED50 = 2, slope = 0.5, activeArms = 4, threshold = 0, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), intersectionTest = "Sidak", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x14' with expected results expect_equal(x14$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x14$iterations[2, ], c(5, 6, 9, 9)) expect_equal(x14$iterations[3, ], c(0, 1, 5, 9)) expect_equal(x14$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x14$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x14$futilityStop, c(1, 0.9, 0.5, 0.1), tolerance = 1e-07) expect_equal(x14$futilityPerStage[1, ], c(0.5, 0.4, 0.1, 0.1), tolerance = 1e-07) expect_equal(x14$futilityPerStage[2, ], c(0.5, 0.5, 0.4, 0), tolerance = 1e-07) expect_equal(x14$earlyStop[1, ], c(0.5, 0.4, 0.1, 0.1), tolerance = 1e-07) expect_equal(x14$earlyStop[2, ], c(0.5, 0.5, 0.4, 0), tolerance = 1e-07) expect_equal(x14$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x14$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x14$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x14$selectedArms)), c(1, 0.1, 0, 1, 0.1, 0, 1, 0.2, 0, 1, 0.1, 0.1, 1, 0.2, 0, 1, 0, 0, 1, 0.2, 0.1, 1, 0.1, 0.1, 1, 0, 0, 1, 0.4, 0.1, 1, 0.2, 0.1, 1, 0.3, 0.3, 1, 0.2, 0, 1, 0.1, 0, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0.5, 0, 1, 0.6, 0.1, 1, 0.9, 0.5, 1, 0.9, 0.9), tolerance = 1e-07) expect_equal(x14$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x14$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x14$numberOfActiveArms[3, ], c(NaN, 1, 1, 1)) expect_equal(x14$expectedNumberOfSubjects, c(NaN, 171.2, 271.2, 368.4), tolerance = 1e-07) expect_equal(unlist(as.list(x14$sampleSizes)), c(10, 20, 0, 10, 8.8333333, 0, 10, 17, 0, 10, 5.8888889, 11.111111, 10, 40, 0, 10, 0, 0, 10, 8.8888889, 20, 10, 3, 11.111111, 10, 0, 0, 10, 58.833333, 100, 10, 8.1111111, 20, 10, 28.111111, 33.333333, 10, 38.2, 0, 10, 16.666667, 0, 10, 33.333333, 60, 10, 39.888889, 44.444444, 10, 98.2, 0, 10, 84.333333, 100, 10, 67.333333, 100, 10, 76.888889, 100), tolerance = 1e-07) expect_equal(x14$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x14$conditionalPowerAchieved[2, ], c(0.0010701396, 1.0749986e-05, 0.015009054, 0.019936014), tolerance = 1e-07) expect_equal(x14$conditionalPowerAchieved[3, ], c(NaN, 0.062530095, 0.19373785, 0.13543053), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x14), NA))) expect_output(print(x14)$show()) invisible(capture.output(expect_error(summary(x14), NA))) expect_output(summary(x14)$show()) x14CodeBased <- eval(parse(text = getObjectRCode(x14, stringWrapParagraphWidth = NULL))) expect_equal(x14CodeBased$iterations, x14$iterations, tolerance = 1e-05) expect_equal(x14CodeBased$rejectAtLeastOne, x14$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x14CodeBased$rejectedArmsPerStage, x14$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x14CodeBased$futilityStop, x14$futilityStop, tolerance = 1e-05) expect_equal(x14CodeBased$futilityPerStage, x14$futilityPerStage, tolerance = 1e-05) expect_equal(x14CodeBased$earlyStop, x14$earlyStop, tolerance = 1e-05) expect_equal(x14CodeBased$successPerStage, x14$successPerStage, tolerance = 1e-05) expect_equal(x14CodeBased$selectedArms, x14$selectedArms, tolerance = 1e-05) expect_equal(x14CodeBased$numberOfActiveArms, x14$numberOfActiveArms, tolerance = 1e-05) expect_equal(x14CodeBased$expectedNumberOfSubjects, x14$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x14CodeBased$sampleSizes, x14$sampleSizes, tolerance = 1e-05) expect_equal(x14CodeBased$conditionalPowerAchieved, x14$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x14), "character") df <- as.data.frame(x14) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x14) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x15 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "all", plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), intersectionTest = "Sidak", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x15' with expected results expect_equal(x15$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x15$iterations[2, ], c(8, 9, 10, 9)) expect_equal(x15$iterations[3, ], c(4, 9, 8, 5)) expect_equal(x15$rejectAtLeastOne, c(0, 0.2, 0.7, 0.9), tolerance = 1e-07) expect_equal(unlist(as.list(x15$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.3, 0.1, 0, 0, 0, 0, 0.2, 0, 0, 0.3, 0, 0, 0.4, 0.1, 0, 0, 0, 0, 0, 0, 0, 0.4, 0.3, 0, 0.6, 0.1), tolerance = 1e-07) expect_equal(x15$futilityStop, c(0.6, 0.1, 0, 0.1), tolerance = 1e-07) expect_equal(x15$futilityPerStage[1, ], c(0.2, 0.1, 0, 0.1), tolerance = 1e-07) expect_equal(x15$futilityPerStage[2, ], c(0.4, 0, 0, 0), tolerance = 1e-07) expect_equal(x15$earlyStop[1, ], c(0.2, 0.1, 0, 0.1), tolerance = 1e-07) expect_equal(x15$earlyStop[2, ], c(0.4, 0, 0.2, 0.4), tolerance = 1e-07) expect_equal(x15$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x15$successPerStage[2, ], c(0, 0, 0.2, 0.4), tolerance = 1e-07) expect_equal(x15$successPerStage[3, ], c(0, 0, 0.2, 0.1), tolerance = 1e-07) expect_equal(unlist(as.list(x15$selectedArms)), c(1, 0.7, 0.2, 1, 0.5, 0.2, 1, 0.7, 0.5, 1, 0.2, 0.2, 1, 0.5, 0.3, 1, 0.5, 0.4, 1, 0.3, 0.3, 1, 0.7, 0.4, 1, 0.6, 0.1, 1, 0.5, 0.4, 1, 0.7, 0.6, 1, 0.8, 0.4, 1, 0.4, 0.4, 1, 0.8, 0.8, 1, 0.8, 0.6, 1, 0.7, 0.5, 1, 0.8, 0.4, 1, 0.9, 0.9, 1, 1, 0.8, 1, 0.9, 0.5), tolerance = 1e-07) expect_equal(x15$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x15$numberOfActiveArms[2, ], c(2.75, 2.5555556, 2.5, 2.6666667), tolerance = 1e-07) expect_equal(x15$numberOfActiveArms[3, ], c(2.5, 2, 2.5, 3), tolerance = 1e-07) expect_equal(x15$expectedNumberOfSubjects, c(460, 640, 571.4, 381.6), tolerance = 1e-07) expect_equal(unlist(as.list(x15$sampleSizes)), c(10, 80, 50, 10, 55.555556, 22.222222, 10, 66.8, 38.5, 10, 22.222222, 1.6, 10, 55, 75, 10, 55.555556, 44.444444, 10, 27.8, 25.5, 10, 69.777778, 14.4, 10, 67.5, 25, 10, 55.555556, 44.444444, 10, 66.8, 48, 10, 80.888889, 14.4, 10, 42.5, 100, 10, 88.888889, 88.888889, 10, 76.8, 48, 10, 69.777778, 15.2, 10, 92.5, 100, 10, 100, 100, 10, 96.8, 73, 10, 92, 15.2), tolerance = 1e-07) expect_equal(x15$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x15$conditionalPowerAchieved[2, ], c(0.26433659, 0.055206819, 0.10369686, 0.046653519), tolerance = 1e-07) expect_equal(x15$conditionalPowerAchieved[3, ], c(0.023182671, 0.15953762, 0.43788092, 0.96046919), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x15), NA))) expect_output(print(x15)$show()) invisible(capture.output(expect_error(summary(x15), NA))) expect_output(summary(x15)$show()) x15CodeBased <- eval(parse(text = getObjectRCode(x15, stringWrapParagraphWidth = NULL))) expect_equal(x15CodeBased$iterations, x15$iterations, tolerance = 1e-05) expect_equal(x15CodeBased$rejectAtLeastOne, x15$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x15CodeBased$rejectedArmsPerStage, x15$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x15CodeBased$futilityStop, x15$futilityStop, tolerance = 1e-05) expect_equal(x15CodeBased$futilityPerStage, x15$futilityPerStage, tolerance = 1e-05) expect_equal(x15CodeBased$earlyStop, x15$earlyStop, tolerance = 1e-05) expect_equal(x15CodeBased$successPerStage, x15$successPerStage, tolerance = 1e-05) expect_equal(x15CodeBased$selectedArms, x15$selectedArms, tolerance = 1e-05) expect_equal(x15CodeBased$numberOfActiveArms, x15$numberOfActiveArms, tolerance = 1e-05) expect_equal(x15CodeBased$expectedNumberOfSubjects, x15$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x15CodeBased$sampleSizes, x15$sampleSizes, tolerance = 1e-05) expect_equal(x15CodeBased$conditionalPowerAchieved, x15$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x15), "character") df <- as.data.frame(x15) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x15) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x16 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "rBest", rValue = 2, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), intersectionTest = "Sidak", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x16' with expected results expect_equal(x16$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x16$iterations[2, ], c(9, 9, 10, 10)) expect_equal(x16$iterations[3, ], c(7, 9, 10, 8)) expect_equal(x16$rejectAtLeastOne, c(0, 0.2, 0.6, 1), tolerance = 1e-07) expect_equal(unlist(as.list(x16$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0.2, 0, 0.2, 0.4, 0, 0, 0, 0, 0.1, 0.1, 0, 0.1, 0.1, 0, 0.7, 0.1), tolerance = 1e-07) expect_equal(x16$futilityStop, c(0.3, 0.1, 0, 0), tolerance = 1e-07) expect_equal(x16$futilityPerStage[1, ], c(0.1, 0.1, 0, 0), tolerance = 1e-07) expect_equal(x16$futilityPerStage[2, ], c(0.2, 0, 0, 0), tolerance = 1e-07) expect_equal(x16$earlyStop[1, ], c(0.1, 0.1, 0, 0), tolerance = 1e-07) expect_equal(x16$earlyStop[2, ], c(0.2, 0, 0, 0.2), tolerance = 1e-07) expect_equal(x16$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x16$successPerStage[2, ], c(0, 0, 0, 0.2), tolerance = 1e-07) expect_equal(x16$successPerStage[3, ], c(0, 0.1, 0.1, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x16$selectedArms)), c(1, 0.3, 0.3, 1, 0.2, 0.2, 1, 0.3, 0.3, 1, 0.1, 0.1, 1, 0.6, 0.4, 1, 0.5, 0.5, 1, 0.2, 0.2, 1, 0.2, 0.1, 1, 0.4, 0.2, 1, 0.2, 0.2, 1, 0.6, 0.6, 1, 0.8, 0.6, 1, 0.4, 0.3, 1, 0.8, 0.8, 1, 0.8, 0.8, 1, 0.9, 0.8, 1, 0.9, 0.7, 1, 0.9, 0.9, 1, 1, 1, 1, 1, 0.8), tolerance = 1e-07) expect_equal(x16$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x16$numberOfActiveArms[2, ], c(1.8888889, 1.8888889, 1.9, 2), tolerance = 1e-07) expect_equal(x16$numberOfActiveArms[3, ], c(1.7142857, 1.8888889, 1.9, 2), tolerance = 1e-07) expect_equal(x16$expectedNumberOfSubjects, c(465.5, 426.3, 413.1, 244.1), tolerance = 1e-07) expect_equal(unlist(as.list(x16$sampleSizes)), c(10, 20.555556, 42.857143, 10, 6.5555556, 22.222222, 10, 30, 1.2, 10, 2.2, 0.5, 10, 66.666667, 57.142857, 10, 42.111111, 55.555556, 10, 4.9, 13.9, 10, 14.1, 5, 10, 35.111111, 28.571429, 10, 11.777778, 1.5555556, 10, 44.7, 33.8, 10, 36.6, 21.75, 10, 41, 42.857143, 10, 63.333333, 68.222222, 10, 49.6, 57.3, 10, 32.9, 27.25, 10, 87.222222, 100, 10, 67.444444, 79.333333, 10, 69.6, 58.1, 10, 42.9, 27.25), tolerance = 1e-07) expect_equal(x16$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x16$conditionalPowerAchieved[2, ], c(0.096913955, 0.09039929, 0.11243241, 0.1746525), tolerance = 1e-07) expect_equal(x16$conditionalPowerAchieved[3, ], c(0.093425176, 0.41153932, 0.67843506, 0.87119979), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x16), NA))) expect_output(print(x16)$show()) invisible(capture.output(expect_error(summary(x16), NA))) expect_output(summary(x16)$show()) x16CodeBased <- eval(parse(text = getObjectRCode(x16, stringWrapParagraphWidth = NULL))) expect_equal(x16CodeBased$iterations, x16$iterations, tolerance = 1e-05) expect_equal(x16CodeBased$rejectAtLeastOne, x16$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x16CodeBased$rejectedArmsPerStage, x16$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x16CodeBased$futilityStop, x16$futilityStop, tolerance = 1e-05) expect_equal(x16CodeBased$futilityPerStage, x16$futilityPerStage, tolerance = 1e-05) expect_equal(x16CodeBased$earlyStop, x16$earlyStop, tolerance = 1e-05) expect_equal(x16CodeBased$successPerStage, x16$successPerStage, tolerance = 1e-05) expect_equal(x16CodeBased$selectedArms, x16$selectedArms, tolerance = 1e-05) expect_equal(x16CodeBased$numberOfActiveArms, x16$numberOfActiveArms, tolerance = 1e-05) expect_equal(x16CodeBased$expectedNumberOfSubjects, x16$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x16CodeBased$sampleSizes, x16$sampleSizes, tolerance = 1e-05) expect_equal(x16CodeBased$conditionalPowerAchieved, x16$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x16), "character") df <- as.data.frame(x16) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x16) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x17 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), intersectionTest = "Simes", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x17' with expected results expect_equal(x17$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x17$iterations[2, ], c(9, 9, 8, 10)) expect_equal(x17$iterations[3, ], c(7, 8, 6, 5)) expect_equal(x17$rejectAtLeastOne, c(0, 0.3, 0.4, 0.8), tolerance = 1e-07) expect_equal(unlist(as.list(x17$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0, 0, 0, 0, 0.1, 0, 0.1, 0.1, 0, 0.1, 0.4, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.3, 0, 0.1, 0.3, 0.2), tolerance = 1e-07) expect_equal(x17$futilityStop, c(0.3, 0.1, 0.2, 0), tolerance = 1e-07) expect_equal(x17$futilityPerStage[1, ], c(0.1, 0.1, 0.2, 0), tolerance = 1e-07) expect_equal(x17$futilityPerStage[2, ], c(0.2, 0, 0, 0), tolerance = 1e-07) expect_equal(x17$earlyStop[1, ], c(0.1, 0.1, 0.2, 0), tolerance = 1e-07) expect_equal(x17$earlyStop[2, ], c(0.2, 0.1, 0.2, 0.5), tolerance = 1e-07) expect_equal(x17$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x17$successPerStage[2, ], c(0, 0.1, 0.2, 0.5), tolerance = 1e-07) expect_equal(x17$successPerStage[3, ], c(0, 0.1, 0.2, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x17$selectedArms)), c(1, 0.3, 0.1, 1, 0.2, 0.1, 1, 0.1, 0, 1, 0.1, 0.1, 1, 0.2, 0.1, 1, 0.6, 0.4, 1, 0.3, 0.1, 1, 0.3, 0.1, 1, 0.3, 0.2, 1, 0.4, 0.4, 1, 0.4, 0.2, 1, 0.7, 0.3, 1, 0.4, 0.4, 1, 0.1, 0.1, 1, 0.7, 0.5, 1, 0.6, 0.2, 1, 0.9, 0.7, 1, 0.9, 0.8, 1, 0.8, 0.6, 1, 1, 0.5), tolerance = 1e-07) expect_equal(x17$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x17$numberOfActiveArms[2, ], c(1.3333333, 1.4444444, 1.875, 1.7), tolerance = 1e-07) expect_equal(x17$numberOfActiveArms[3, ], c(1.1428571, 1.25, 1.3333333, 1.4), tolerance = 1e-07) expect_equal(x17$expectedNumberOfSubjects, c(339.9, 359.2, 222.7, 176), tolerance = 1e-07) expect_equal(unlist(as.list(x17$sampleSizes)), c(10, 31.333333, 14.285714, 10, 17.666667, 8.125, 10, 12.5, 0, 10, 1.2, 7.4, 10, 21.222222, 14.285714, 10, 35.888889, 50, 10, 25.625, 16.666667, 10, 13.6, 7.4, 10, 24.666667, 21.142857, 10, 33.222222, 50, 10, 31.5, 17.333333, 10, 26.2, 9.8, 10, 22.444444, 57.142857, 10, 5.1111111, 12.5, 10, 33, 19.833333, 10, 21.2, 12.2, 10, 67.333333, 92.571429, 10, 59.444444, 95.625, 10, 45.5, 36.5, 10, 34.4, 22), tolerance = 1e-07) expect_equal(x17$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x17$conditionalPowerAchieved[2, ], c(0.039329058, 0.14668797, 0.16576057, 0.14296603), tolerance = 1e-07) expect_equal(x17$conditionalPowerAchieved[3, ], c(0.28763166, 0.40839298, 0.6012117, 0.84313531), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x17), NA))) expect_output(print(x17)$show()) invisible(capture.output(expect_error(summary(x17), NA))) expect_output(summary(x17)$show()) x17CodeBased <- eval(parse(text = getObjectRCode(x17, stringWrapParagraphWidth = NULL))) expect_equal(x17CodeBased$iterations, x17$iterations, tolerance = 1e-05) expect_equal(x17CodeBased$rejectAtLeastOne, x17$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x17CodeBased$rejectedArmsPerStage, x17$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x17CodeBased$futilityStop, x17$futilityStop, tolerance = 1e-05) expect_equal(x17CodeBased$futilityPerStage, x17$futilityPerStage, tolerance = 1e-05) expect_equal(x17CodeBased$earlyStop, x17$earlyStop, tolerance = 1e-05) expect_equal(x17CodeBased$successPerStage, x17$successPerStage, tolerance = 1e-05) expect_equal(x17CodeBased$selectedArms, x17$selectedArms, tolerance = 1e-05) expect_equal(x17CodeBased$numberOfActiveArms, x17$numberOfActiveArms, tolerance = 1e-05) expect_equal(x17CodeBased$expectedNumberOfSubjects, x17$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x17CodeBased$sampleSizes, x17$sampleSizes, tolerance = 1e-05) expect_equal(x17CodeBased$conditionalPowerAchieved, x17$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x17), "character") df <- as.data.frame(x17) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x17) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x18 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), intersectionTest = "Simes", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x18' with expected results expect_equal(x18$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x18$iterations[2, ], c(7, 8, 8, 10)) expect_equal(x18$iterations[3, ], c(7, 8, 7, 5)) expect_equal(x18$rejectAtLeastOne, c(0.1, 0.1, 0.3, 0.7), tolerance = 1e-07) expect_equal(unlist(as.list(x18$rejectedArmsPerStage)), c(0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0.2, 0, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0.1, 0, 0.1, 0.1), tolerance = 1e-07) expect_equal(x18$futilityStop, c(0.3, 0.2, 0.2, 0), tolerance = 1e-07) expect_equal(x18$futilityPerStage[1, ], c(0.3, 0.2, 0.2, 0), tolerance = 1e-07) expect_equal(x18$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x18$earlyStop[1, ], c(0.3, 0.2, 0.2, 0), tolerance = 1e-07) expect_equal(x18$earlyStop[2, ], c(0, 0, 0.1, 0.5), tolerance = 1e-07) expect_equal(x18$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x18$successPerStage[2, ], c(0, 0, 0.1, 0.5), tolerance = 1e-07) expect_equal(x18$successPerStage[3, ], c(0.1, 0.1, 0.2, 0.2), tolerance = 1e-07) expect_equal(unlist(as.list(x18$selectedArms)), c(1, 0.2, 0.2, 1, 0.2, 0.2, 1, 0, 0, 1, 0.2, 0.1, 1, 0.2, 0.2, 1, 0.2, 0.2, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.1, 0.1, 1, 0.4, 0.4, 1, 0.4, 0.1, 1, 0.2, 0.2, 1, 0.3, 0.3, 1, 0.2, 0.1, 1, 0.3, 0.2, 1, 0.7, 0.7, 1, 0.8, 0.8, 1, 0.8, 0.7, 1, 1, 0.5), tolerance = 1e-07) expect_equal(x18$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x18$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x18$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x18$expectedNumberOfSubjects, c(241.6, 306.8, 235.2, 156), tolerance = 1e-07) expect_equal(unlist(as.list(x18$sampleSizes)), c(10, 27.285714, 27.285714, 10, 25, 25, 10, 0, 0, 10, 2.6, 4.4, 10, 16.142857, 16.142857, 10, 16, 16, 10, 3.5, 4, 10, 1.4, 2.8, 10, 14.285714, 14.285714, 10, 12.5, 12.5, 10, 40.875, 46.571429, 10, 15.8, 5.2, 10, 10.714286, 10.714286, 10, 26.75, 26.75, 10, 19.875, 8.2857143, 10, 18.6, 16.8, 10, 68.428571, 68.428571, 10, 80.25, 80.25, 10, 64.25, 58.857143, 10, 38.4, 29.2), tolerance = 1e-07) expect_equal(x18$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x18$conditionalPowerAchieved[2, ], c(0.064400041, 0.012818439, 0.075196936, 0.13824332), tolerance = 1e-07) expect_equal(x18$conditionalPowerAchieved[3, ], c(0.066989319, 0.23112098, 0.45267281, 0.52012057), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x18), NA))) expect_output(print(x18)$show()) invisible(capture.output(expect_error(summary(x18), NA))) expect_output(summary(x18)$show()) x18CodeBased <- eval(parse(text = getObjectRCode(x18, stringWrapParagraphWidth = NULL))) expect_equal(x18CodeBased$iterations, x18$iterations, tolerance = 1e-05) expect_equal(x18CodeBased$rejectAtLeastOne, x18$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x18CodeBased$rejectedArmsPerStage, x18$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x18CodeBased$futilityStop, x18$futilityStop, tolerance = 1e-05) expect_equal(x18CodeBased$futilityPerStage, x18$futilityPerStage, tolerance = 1e-05) expect_equal(x18CodeBased$earlyStop, x18$earlyStop, tolerance = 1e-05) expect_equal(x18CodeBased$successPerStage, x18$successPerStage, tolerance = 1e-05) expect_equal(x18CodeBased$selectedArms, x18$selectedArms, tolerance = 1e-05) expect_equal(x18CodeBased$numberOfActiveArms, x18$numberOfActiveArms, tolerance = 1e-05) expect_equal(x18CodeBased$expectedNumberOfSubjects, x18$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x18CodeBased$sampleSizes, x18$sampleSizes, tolerance = 1e-05) expect_equal(x18CodeBased$conditionalPowerAchieved, x18$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x18), "character") df <- as.data.frame(x18) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x18) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x19 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "all", plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), intersectionTest = "Simes", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x19' with expected results expect_equal(x19$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x19$iterations[2, ], c(8, 8, 10, 10)) expect_equal(x19$iterations[3, ], c(8, 8, 9, 9)) expect_equal(x19$rejectAtLeastOne, c(0, 0, 0.9, 1), tolerance = 1e-07) expect_equal(unlist(as.list(x19$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.2, 0, 0.4, 0.2, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.1, 0, 0.6, 0.2, 0, 0, 0, 0, 0, 0, 0, 0.8, 0.1, 0, 0.7, 0), tolerance = 1e-07) expect_equal(x19$futilityStop, c(0.2, 0.2, 0, 0), tolerance = 1e-07) expect_equal(x19$futilityPerStage[1, ], c(0.2, 0.2, 0, 0), tolerance = 1e-07) expect_equal(x19$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x19$earlyStop[1, ], c(0.2, 0.2, 0, 0), tolerance = 1e-07) expect_equal(x19$earlyStop[2, ], c(0, 0, 0.1, 0.1), tolerance = 1e-07) expect_equal(x19$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x19$successPerStage[2, ], c(0, 0, 0.1, 0.1), tolerance = 1e-07) expect_equal(x19$successPerStage[3, ], c(0, 0, 0.1, 0.2), tolerance = 1e-07) expect_equal(unlist(as.list(x19$selectedArms)), c(1, 0.3, 0.3, 1, 0.3, 0.3, 1, 0.8, 0.8, 1, 0.6, 0.6, 1, 0.5, 0.5, 1, 0.6, 0.6, 1, 0.7, 0.6, 1, 0.9, 0.8, 1, 0.5, 0.5, 1, 0.5, 0.5, 1, 0.9, 0.8, 1, 0.8, 0.7, 1, 0.6, 0.6, 1, 0.5, 0.5, 1, 0.9, 0.8, 1, 0.8, 0.7, 1, 0.8, 0.8, 1, 0.8, 0.8, 1, 1, 0.9, 1, 1, 0.9), tolerance = 1e-07) expect_equal(x19$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x19$numberOfActiveArms[2, ], c(2.375, 2.375, 3.3, 3.1), tolerance = 1e-07) expect_equal(x19$numberOfActiveArms[3, ], c(2.375, 2.375, 3.3333333, 3.1111111), tolerance = 1e-07) expect_equal(x19$expectedNumberOfSubjects, c(523.8, 590, 818.4, 765.4), tolerance = 1e-07) expect_equal(unlist(as.list(x19$sampleSizes)), c(10, 28.125, 28.125, 10, 37.5, 37.5, 10, 73.6, 81.666667, 10, 55.2, 61.333333, 10, 58.625, 58.625, 10, 75, 75, 10, 70, 66.666667, 10, 85.2, 83.555556, 10, 53.125, 53.125, 10, 62.5, 62.5, 10, 83.6, 81.666667, 10, 71.1, 67.777778, 10, 65.625, 65.625, 10, 62.5, 62.5, 10, 83.6, 81.666667, 10, 75.2, 72.444444, 10, 90.625, 90.625, 10, 100, 100, 10, 93.6, 92.777778, 10, 91.1, 90), tolerance = 1e-07) expect_equal(x19$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x19$conditionalPowerAchieved[2, ], c(0.10081958, 0.049714416, 0.18629752, 0.24626925), tolerance = 1e-07) expect_equal(x19$conditionalPowerAchieved[3, ], c(0.088506618, 0.13049081, 0.60815392, 0.85577973), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x19), NA))) expect_output(print(x19)$show()) invisible(capture.output(expect_error(summary(x19), NA))) expect_output(summary(x19)$show()) x19CodeBased <- eval(parse(text = getObjectRCode(x19, stringWrapParagraphWidth = NULL))) expect_equal(x19CodeBased$iterations, x19$iterations, tolerance = 1e-05) expect_equal(x19CodeBased$rejectAtLeastOne, x19$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x19CodeBased$rejectedArmsPerStage, x19$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x19CodeBased$futilityStop, x19$futilityStop, tolerance = 1e-05) expect_equal(x19CodeBased$futilityPerStage, x19$futilityPerStage, tolerance = 1e-05) expect_equal(x19CodeBased$earlyStop, x19$earlyStop, tolerance = 1e-05) expect_equal(x19CodeBased$successPerStage, x19$successPerStage, tolerance = 1e-05) expect_equal(x19CodeBased$selectedArms, x19$selectedArms, tolerance = 1e-05) expect_equal(x19CodeBased$numberOfActiveArms, x19$numberOfActiveArms, tolerance = 1e-05) expect_equal(x19CodeBased$expectedNumberOfSubjects, x19$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x19CodeBased$sampleSizes, x19$sampleSizes, tolerance = 1e-05) expect_equal(x19CodeBased$conditionalPowerAchieved, x19$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x19), "character") df <- as.data.frame(x19) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x19) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x20 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "rBest", rValue = 2, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), intersectionTest = "Hierarchical", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x20' with expected results expect_equal(x20$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x20$iterations[2, ], c(7, 7, 9, 10)) expect_equal(x20$iterations[3, ], c(2, 5, 3, 1)) expect_equal(x20$rejectAtLeastOne, c(0, 0, 0.2, 0), tolerance = 1e-07) expect_equal(unlist(as.list(x20$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), tolerance = 1e-07) expect_equal(x20$futilityStop, c(0.8, 0.5, 0.6, 0.9), tolerance = 1e-07) expect_equal(x20$futilityPerStage[1, ], c(0.3, 0.3, 0.1, 0), tolerance = 1e-07) expect_equal(x20$futilityPerStage[2, ], c(0.5, 0.2, 0.5, 0.9), tolerance = 1e-07) expect_equal(x20$earlyStop[1, ], c(0.3, 0.3, 0.1, 0), tolerance = 1e-07) expect_equal(x20$earlyStop[2, ], c(0.5, 0.2, 0.6, 0.9), tolerance = 1e-07) expect_equal(x20$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x20$successPerStage[2, ], c(0, 0, 0.1, 0), tolerance = 1e-07) expect_equal(x20$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x20$selectedArms)), c(1, 0.2, 0.2, 1, 0.5, 0.5, 1, 0.4, 0.3, 1, 0.1, 0.1, 1, 0.3, 0.1, 1, 0.2, 0, 1, 0.1, 0, 1, 0.4, 0, 1, 0.2, 0, 1, 0.3, 0.3, 1, 0.5, 0.1, 1, 0.8, 0, 1, 0.5, 0.1, 1, 0.4, 0.2, 1, 0.6, 0.2, 1, 0.7, 0.1, 1, 0.7, 0.2, 1, 0.7, 0.5, 1, 0.9, 0.3, 1, 1, 0.1), tolerance = 1e-07) expect_equal(x20$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x20$numberOfActiveArms[2, ], c(1.7142857, 2, 1.7777778, 2), tolerance = 1e-07) expect_equal(x20$numberOfActiveArms[3, ], c(2, 2, 2, 2)) expect_equal(x20$expectedNumberOfSubjects, c(267.3, 301.1, 325.2, 315.5), tolerance = 1e-07) expect_equal(unlist(as.list(x20$sampleSizes)), c(10, 24.142857, 84.5, 10, 51.714286, 72.2, 10, 39, 83.333333, 10, 8.8, 88, 10, 36.142857, 50, 10, 16.285714, 0, 10, 4.1111111, 0, 10, 28.2, 0, 10, 28.571429, 0, 10, 30.142857, 42.2, 10, 42.555556, 33.333333, 10, 60.9, 0, 10, 60.285714, 34.5, 10, 37.857143, 30, 10, 55.222222, 50, 10, 61.5, 88, 10, 88.857143, 84.5, 10, 68, 72.2, 10, 81.555556, 83.333333, 10, 79.7, 88), tolerance = 1e-07) expect_equal(x20$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x20$conditionalPowerAchieved[2, ], c(0.14688077, 0.19244817, 0.083030211, 0.1268121), tolerance = 1e-07) expect_equal(x20$conditionalPowerAchieved[3, ], c(0.021357961, 0.35341345, 0.67128636, 1), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x20), NA))) expect_output(print(x20)$show()) invisible(capture.output(expect_error(summary(x20), NA))) expect_output(summary(x20)$show()) x20CodeBased <- eval(parse(text = getObjectRCode(x20, stringWrapParagraphWidth = NULL))) expect_equal(x20CodeBased$iterations, x20$iterations, tolerance = 1e-05) expect_equal(x20CodeBased$rejectAtLeastOne, x20$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x20CodeBased$rejectedArmsPerStage, x20$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x20CodeBased$futilityStop, x20$futilityStop, tolerance = 1e-05) expect_equal(x20CodeBased$futilityPerStage, x20$futilityPerStage, tolerance = 1e-05) expect_equal(x20CodeBased$earlyStop, x20$earlyStop, tolerance = 1e-05) expect_equal(x20CodeBased$successPerStage, x20$successPerStage, tolerance = 1e-05) expect_equal(x20CodeBased$selectedArms, x20$selectedArms, tolerance = 1e-05) expect_equal(x20CodeBased$numberOfActiveArms, x20$numberOfActiveArms, tolerance = 1e-05) expect_equal(x20CodeBased$expectedNumberOfSubjects, x20$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x20CodeBased$sampleSizes, x20$sampleSizes, tolerance = 1e-05) expect_equal(x20CodeBased$conditionalPowerAchieved, x20$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x20), "character") df <- as.data.frame(x20) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x20) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x21 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0, typeOfSelection = "epsilon", epsilonValue = 0.1, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = c(TRUE, FALSE), intersectionTest = "Hierarchical", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x21' with expected results expect_equal(x21$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x21$iterations[2, ], c(9, 9, 9, 10)) expect_equal(x21$iterations[3, ], c(2, 4, 4, 2)) expect_equal(x21$rejectAtLeastOne, c(0, 0, 0, 0)) expect_equal(unlist(as.list(x21$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x21$futilityStop, c(0.8, 0.6, 0.6, 0.8), tolerance = 1e-07) expect_equal(x21$futilityPerStage[1, ], c(0.1, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(x21$futilityPerStage[2, ], c(0.7, 0.5, 0.5, 0.8), tolerance = 1e-07) expect_equal(x21$earlyStop[1, ], c(0.1, 0.1, 0.1, 0), tolerance = 1e-07) expect_equal(x21$earlyStop[2, ], c(0.7, 0.5, 0.5, 0.8), tolerance = 1e-07) expect_equal(x21$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x21$successPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x21$successPerStage[3, ], c(0, 0, 0, 0)) expect_equal(unlist(as.list(x21$selectedArms)), c(1, 0.2, 0.2, 1, 0.4, 0.4, 1, 0.4, 0.4, 1, 0.2, 0.2, 1, 0.2, 0, 1, 0.2, 0.1, 1, 0.3, 0.1, 1, 0.4, 0.1, 1, 0.5, 0.1, 1, 0.5, 0.2, 1, 0.2, 0.2, 1, 0.7, 0.1, 1, 0.4, 0, 1, 0.2, 0, 1, 0.7, 0.3, 1, 0.9, 0.2, 1, 0.9, 0.2, 1, 0.9, 0.4, 1, 0.9, 0.4, 1, 1, 0.2), tolerance = 1e-07) expect_equal(x21$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x21$numberOfActiveArms[2, ], c(1.4444444, 1.4444444, 1.7777778, 2.2), tolerance = 1e-07) expect_equal(x21$numberOfActiveArms[3, ], c(1.5, 1.75, 2.5, 3), tolerance = 1e-07) expect_equal(x21$expectedNumberOfSubjects, c(240.6, 332.2, 346.2, 256.5), tolerance = 1e-07) expect_equal(unlist(as.list(x21$sampleSizes)), c(10, 17.666667, 79, 10, 39.222222, 88.25, 10, 35.777778, 80.25, 10, 7.9, 39, 10, 13.555556, 0, 10, 22.222222, 25, 10, 24.666667, 5.25, 10, 17.7, 25, 10, 42.333333, 50, 10, 47.555556, 50, 10, 22.222222, 50, 10, 44.9, 25, 10, 27.111111, 0, 10, 14.111111, 0, 10, 51.888889, 55.25, 10, 50.7, 39, 10, 64.888889, 79, 10, 78.666667, 88.25, 10, 74.111111, 80.25, 10, 51.9, 39), tolerance = 1e-07) expect_equal(x21$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x21$conditionalPowerAchieved[2, ], c(0.071382822, 0.0014758747, 0.067299064, 0.14413714), tolerance = 1e-07) expect_equal(x21$conditionalPowerAchieved[3, ], c(0.29927137, 0.0060466075, 0.55383829, 0.59417789), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x21), NA))) expect_output(print(x21)$show()) invisible(capture.output(expect_error(summary(x21), NA))) expect_output(summary(x21)$show()) x21CodeBased <- eval(parse(text = getObjectRCode(x21, stringWrapParagraphWidth = NULL))) expect_equal(x21CodeBased$iterations, x21$iterations, tolerance = 1e-05) expect_equal(x21CodeBased$rejectAtLeastOne, x21$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x21CodeBased$rejectedArmsPerStage, x21$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x21CodeBased$futilityStop, x21$futilityStop, tolerance = 1e-05) expect_equal(x21CodeBased$futilityPerStage, x21$futilityPerStage, tolerance = 1e-05) expect_equal(x21CodeBased$earlyStop, x21$earlyStop, tolerance = 1e-05) expect_equal(x21CodeBased$successPerStage, x21$successPerStage, tolerance = 1e-05) expect_equal(x21CodeBased$selectedArms, x21$selectedArms, tolerance = 1e-05) expect_equal(x21CodeBased$numberOfActiveArms, x21$numberOfActiveArms, tolerance = 1e-05) expect_equal(x21CodeBased$expectedNumberOfSubjects, x21$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x21CodeBased$sampleSizes, x21$sampleSizes, tolerance = 1e-05) expect_equal(x21CodeBased$conditionalPowerAchieved, x21$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x21), "character") df <- as.data.frame(x21) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x21) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } x22 <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), activeArms = 4, threshold = 0.1, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.1, 0.3, 0.1), intersectionTest = "Hierarchical", conditionalPower = 0.8, minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), directionUpper = FALSE, maxNumberOfIterations = 1 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x22' with expected results expect_equal(x22$iterations[1, ], c(1, 1, 1)) expect_equal(x22$iterations[2, ], c(0, 1, 0)) expect_equal(x22$iterations[3, ], c(0, 0, 0)) expect_equal(x22$rejectAtLeastOne, c(0, 0, 0)) expect_equal(unlist(as.list(x22$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) expect_equal(x22$futilityStop, c(1, 1, 1)) expect_equal(x22$futilityPerStage[1, ], c(1, 0, 1)) expect_equal(x22$futilityPerStage[2, ], c(0, 1, 0)) expect_equal(x22$earlyStop[1, ], c(1, 0, 1)) expect_equal(x22$earlyStop[2, ], c(0, 1, 0)) expect_equal(x22$successPerStage[1, ], c(0, 0, 0)) expect_equal(x22$successPerStage[2, ], c(0, 0, 0)) expect_equal(x22$successPerStage[3, ], c(0, 0, 0)) expect_equal(unlist(as.list(x22$selectedArms)), c(1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0)) expect_equal(x22$numberOfActiveArms[1, ], c(4, 4, 4)) expect_equal(x22$numberOfActiveArms[2, ], c(NaN, 1, NaN)) expect_equal(x22$numberOfActiveArms[3, ], c(NaN, NaN, NaN)) expect_equal(x22$expectedNumberOfSubjects, c(NaN, NaN, NaN)) expect_equal(unlist(as.list(x22$sampleSizes)), c(10, 0, 0, 10, 0, 0, 10, NaN, NaN, 10, 0, 0, 10, 0, 0, 10, NaN, NaN, 10, 0, 0, 10, 91, 0, 10, NaN, NaN, 10, 0, 0, 10, 0, 0, 10, NaN, NaN, 10, 0, 0, 10, 91, 0, 10, NaN, NaN)) expect_equal(x22$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(x22$conditionalPowerAchieved[2, ], c(NaN, 3.7427402e-05, NaN), tolerance = 1e-07) expect_equal(x22$conditionalPowerAchieved[3, ], c(NaN, NaN, NaN)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x22), NA))) expect_output(print(x22)$show()) invisible(capture.output(expect_error(summary(x22), NA))) expect_output(summary(x22)$show()) x22CodeBased <- eval(parse(text = getObjectRCode(x22, stringWrapParagraphWidth = NULL))) expect_equal(x22CodeBased$iterations, x22$iterations, tolerance = 1e-05) expect_equal(x22CodeBased$rejectAtLeastOne, x22$rejectAtLeastOne, tolerance = 1e-05) expect_equal(x22CodeBased$rejectedArmsPerStage, x22$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(x22CodeBased$futilityStop, x22$futilityStop, tolerance = 1e-05) expect_equal(x22CodeBased$futilityPerStage, x22$futilityPerStage, tolerance = 1e-05) expect_equal(x22CodeBased$earlyStop, x22$earlyStop, tolerance = 1e-05) expect_equal(x22CodeBased$successPerStage, x22$successPerStage, tolerance = 1e-05) expect_equal(x22CodeBased$selectedArms, x22$selectedArms, tolerance = 1e-05) expect_equal(x22CodeBased$numberOfActiveArms, x22$numberOfActiveArms, tolerance = 1e-05) expect_equal(x22CodeBased$expectedNumberOfSubjects, x22$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(x22CodeBased$sampleSizes, x22$sampleSizes, tolerance = 1e-05) expect_equal(x22CodeBased$conditionalPowerAchieved, x22$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x22), "character") df <- as.data.frame(x22) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x22) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmRates': using calcSubjectsFunction", { .skipTestIfDisabled() .skipTestIfNotX64() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmRates} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmRatesGenerate} # @refFS[Formula]{fs:simulationMultiArmRatesTestStatistics} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} calcSubjectsFunctionSimulationMultiArmRates <- function(..., stage, minNumberOfSubjectsPerStage) { return(ifelse(stage == 3, 33, minNumberOfSubjectsPerStage[stage])) } x <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, plannedSubjects = c(10, 30, 50), piControl = 0.3, piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), minNumberOfSubjectsPerStage = c(10, 4, 4), maxNumberOfSubjectsPerStage = c(10, 100, 100), maxNumberOfIterations = 10, calcSubjectsFunction = calcSubjectsFunctionSimulationMultiArmRates ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x' with expected results expect_equal(x$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x$iterations[3, ], c(10, 10, 10, 9)) expect_equal(x$rejectAtLeastOne, c(0, 0, 0.2, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0, 0.1, 0.3), tolerance = 1e-07) expect_equal(x$futilityStop, c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[2, ], c(0, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$successPerStage[2, ], c(0, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x$successPerStage[3, ], c(0, 0, 0.2, 0.3), tolerance = 1e-07) expect_equal(unlist(as.list(x$selectedArms)), c(1, 0.1, 0.1, 1, 0.4, 0.4, 1, 0, 0, 1, 0.1, 0.1, 1, 0, 0, 1, 0.1, 0.1, 1, 0.4, 0.4, 1, 0.2, 0.2, 1, 0.3, 0.3, 1, 0.2, 0.2, 1, 0.2, 0.2, 1, 0.1, 0.1, 1, 0.6, 0.6, 1, 0.3, 0.3, 1, 0.4, 0.4, 1, 0.6, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9), tolerance = 1e-07) expect_equal(x$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x$expectedNumberOfSubjects, c(124, 124, 124, 117.4), tolerance = 1e-07) expect_equal(unlist(as.list(x$sampleSizes)), c(10, 0.4, 3.3, 10, 1.6, 13.2, 10, 0, 0, 10, 0.4, 3.6666667, 10, 0, 0, 10, 0.4, 3.3, 10, 1.6, 13.2, 10, 0.8, 7.3333333, 10, 1.2, 9.9, 10, 0.8, 6.6, 10, 0.8, 6.6, 10, 0.4, 3.6666667, 10, 2.4, 19.8, 10, 1.2, 9.9, 10, 1.6, 13.2, 10, 2.4, 18.333333, 10, 4, 33, 10, 4, 33, 10, 4, 33, 10, 4, 33), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$conditionalPowerAchieved[2, ], c(0.012189382, 0.016190277, 0.020380353, 0.11925746), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[3, ], c(0.32488024, 0.34652134, 0.40081174, 0.68872913), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$iterations, x$iterations, tolerance = 1e-05) expect_equal(xCodeBased$rejectAtLeastOne, x$rejectAtLeastOne, tolerance = 1e-05) expect_equal(xCodeBased$rejectedArmsPerStage, x$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$futilityStop, x$futilityStop, tolerance = 1e-05) expect_equal(xCodeBased$futilityPerStage, x$futilityPerStage, tolerance = 1e-05) expect_equal(xCodeBased$earlyStop, x$earlyStop, tolerance = 1e-05) expect_equal(xCodeBased$successPerStage, x$successPerStage, tolerance = 1e-05) expect_equal(xCodeBased$selectedArms, x$selectedArms, tolerance = 1e-05) expect_equal(xCodeBased$numberOfActiveArms, x$numberOfActiveArms, tolerance = 1e-05) expect_equal(xCodeBased$expectedNumberOfSubjects, x$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(xCodeBased$sampleSizes, x$sampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$conditionalPowerAchieved, x$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmRates': using selectArmsFunction", { .skipTestIfDisabled() .skipTestIfNotX64() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmRates} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmRatesGenerate} # @refFS[Formula]{fs:simulationMultiArmRatesTestStatistics} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} selectArmsFunctionSimulationMultiArmRates <- function(effectSizes) { return(c(TRUE, FALSE, FALSE, FALSE)) } x <- getSimulationMultiArmRates( seed = 1234, getDesignFisher(informationRates = c(0.2, 0.6, 1)), typeOfShape = "linear", activeArms = 4, plannedSubjects = c(10, 30, 50), piMaxVector = seq(0.3, 0.6, 0.1), adaptations = rep(TRUE, 2), maxNumberOfIterations = 10, selectArmsFunction = selectArmsFunctionSimulationMultiArmRates, typeOfSelection = "userDefined" ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x' with expected results expect_equal(x$iterations[1, ], c(10, 10, 10, 10)) expect_equal(x$iterations[2, ], c(10, 10, 10, 10)) expect_equal(x$iterations[3, ], c(10, 10, 10, 9)) expect_equal(x$rejectAtLeastOne, c(0, 0, 0.1, 0.4), tolerance = 1e-07) expect_equal(unlist(as.list(x$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0, 0, 0.1, 0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.4, 0, 0), tolerance = 1e-07) expect_equal(x$futilityStop, c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$futilityPerStage[2, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[1, ], c(0, 0, 0, 0)) expect_equal(x$earlyStop[2, ], c(0, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x$successPerStage[1, ], c(0, 0, 0, 0)) expect_equal(x$successPerStage[2, ], c(0, 0, 0, 0.1), tolerance = 1e-07) expect_equal(x$successPerStage[3, ], c(0, 0, 0.1, 0), tolerance = 1e-07) expect_equal(unlist(as.list(x$selectedArms)), c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9), tolerance = 1e-07) expect_equal(x$numberOfActiveArms[1, ], c(4, 4, 4, 4)) expect_equal(x$numberOfActiveArms[2, ], c(1, 1, 1, 1)) expect_equal(x$numberOfActiveArms[3, ], c(1, 1, 1, 1)) expect_equal(x$expectedNumberOfSubjects, c(130, 130, 130, 126)) expect_equal(unlist(as.list(x$sampleSizes)), c(10, 20, 20, 10, 20, 20, 10, 20, 20, 10, 20, 20, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 0, 0, 10, 20, 20, 10, 20, 20, 10, 20, 20, 10, 20, 20)) expect_equal(x$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$conditionalPowerAchieved[2, ], c(0.044616119, 0.11264062, 0.1248477, 0.43958255), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[3, ], c(0.087582974, 0.1172724, 0.15105487, 0.4331775), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$iterations, x$iterations, tolerance = 1e-05) expect_equal(xCodeBased$rejectAtLeastOne, x$rejectAtLeastOne, tolerance = 1e-05) expect_equal(xCodeBased$rejectedArmsPerStage, x$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$futilityStop, x$futilityStop, tolerance = 1e-05) expect_equal(xCodeBased$futilityPerStage, x$futilityPerStage, tolerance = 1e-05) expect_equal(xCodeBased$earlyStop, x$earlyStop, tolerance = 1e-05) expect_equal(xCodeBased$successPerStage, x$successPerStage, tolerance = 1e-05) expect_equal(xCodeBased$selectedArms, x$selectedArms, tolerance = 1e-05) expect_equal(xCodeBased$numberOfActiveArms, x$numberOfActiveArms, tolerance = 1e-05) expect_equal(xCodeBased$expectedNumberOfSubjects, x$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(xCodeBased$sampleSizes, x$sampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$conditionalPowerAchieved, x$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmRates': typeOfShape = sigmoidEmax", { .skipTestIfDisabled() .skipTestIfNotX64() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmRates} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmRatesGenerate} # @refFS[Formula]{fs:simulationMultiArmRatesTestStatistics} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} designIN <- getDesignInverseNormal(typeOfDesign = "P", kMax = 3, futilityBounds = c(0, 0)) x <- getSimulationMultiArmRates(designIN, activeArms = 3, typeOfShape = "sigmoidEmax", piMaxVector = seq(0.1, 0.9, 0.2), gED50 = 2, plannedSubjects = cumsum(rep(20, 3)), piControl = 0.1, intersectionTest = "Sidak", typeOfSelection = "rBest", rValue = 2, threshold = -Inf, successCriterion = "all", maxNumberOfIterations = 100, seed = 3456 ) ## Comparison of the results of SimulationResultsMultiArmRates object 'x' with expected results expect_equal(x$iterations[1, ], c(100, 100, 100, 100, 100)) expect_equal(x$iterations[2, ], c(20, 60, 88, 84, 81)) expect_equal(x$iterations[3, ], c(4, 45, 70, 38, 20)) expect_equal(x$rejectAtLeastOne, c(0, 0.07, 0.55, 0.89, 0.99), tolerance = 1e-07) expect_equal(unlist(as.list(x$rejectedArmsPerStage)), c(0, 0, 0, 0, 0, 0, 0.03, 0.02, 0.01, 0.11, 0.11, 0.05, 0.19, 0.06, 0.03, 0, 0, 0, 0, 0.01, 0.03, 0.07, 0.1, 0.13, 0.3, 0.22, 0.14, 0.45, 0.3, 0.12, 0, 0, 0, 0.01, 0.03, 0.01, 0.11, 0.23, 0.18, 0.41, 0.32, 0.09, 0.62, 0.31, 0.04), tolerance = 1e-07) expect_equal(x$futilityStop, c(0.96, 0.54, 0.13, 0.05, 0), tolerance = 1e-07) expect_equal(x$futilityPerStage[1, ], c(0.8, 0.4, 0.11, 0.05, 0), tolerance = 1e-07) expect_equal(x$futilityPerStage[2, ], c(0.16, 0.14, 0.02, 0, 0), tolerance = 1e-07) expect_equal(x$earlyStop[1, ], c(0.8, 0.4, 0.12, 0.16, 0.19), tolerance = 1e-07) expect_equal(x$earlyStop[2, ], c(0.16, 0.15, 0.18, 0.46, 0.61), tolerance = 1e-07) expect_equal(x$successPerStage[1, ], c(0, 0, 0.01, 0.11, 0.19), tolerance = 1e-07) expect_equal(x$successPerStage[2, ], c(0, 0.01, 0.16, 0.46, 0.61), tolerance = 1e-07) expect_equal(x$successPerStage[3, ], c(0, 0.01, 0.15, 0.18, 0.14), tolerance = 1e-07) expect_equal(unlist(as.list(x$selectedArms)), c(1, 0.11, 0.01, 1, 0.24, 0.17, 1, 0.26, 0.2, 1, 0.24, 0.14, 1, 0.14, 0.08, 1, 0.13, 0.03, 1, 0.44, 0.34, 1, 0.7, 0.55, 1, 0.69, 0.31, 1, 0.69, 0.13, 1, 0.16, 0.04, 1, 0.52, 0.39, 1, 0.8, 0.65, 1, 0.75, 0.31, 1, 0.79, 0.19, 1, 0.2, 0.04, 1, 0.6, 0.45, 1, 0.88, 0.7, 1, 0.84, 0.38, 1, 0.81, 0.2), tolerance = 1e-07) expect_equal(x$numberOfActiveArms[1, ], c(3, 3, 3, 3, 3)) expect_equal(x$numberOfActiveArms[2, ], c(2, 2, 2, 2, 2)) expect_equal(x$numberOfActiveArms[3, ], c(2, 2, 2, 2, 2)) expect_equal(x$expectedNumberOfSubjects, c(94.4, 143, 174.8, 153.2, 140.6), tolerance = 1e-07) expect_equal(unlist(as.list(x$sampleSizes)), c(20, 11, 5, 20, 8, 7.5555556, 20, 5.9090909, 5.7142857, 20, 5.7142857, 7.3684211, 20, 3.4567901, 8, 20, 13, 15, 20, 14.666667, 15.111111, 20, 15.909091, 15.714286, 20, 16.428571, 16.315789, 20, 17.037037, 13, 20, 16, 20, 20, 17.333333, 17.333333, 20, 18.181818, 18.571429, 20, 17.857143, 16.315789, 20, 19.506173, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[1, ], c(NA_real_, NA_real_, NA_real_, NA_real_, NA_real_)) expect_equal(x$conditionalPowerAchieved[2, ], c(0.011866207, 0.085418744, 0.23090361, 0.47460917, 0.65183497), tolerance = 1e-07) expect_equal(x$conditionalPowerAchieved[3, ], c(0.02497337, 0.151524, 0.4525101, 0.68922536, 0.80573911), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(x), NA))) expect_output(print(x)$show()) invisible(capture.output(expect_error(summary(x), NA))) expect_output(summary(x)$show()) xCodeBased <- eval(parse(text = getObjectRCode(x, stringWrapParagraphWidth = NULL))) expect_equal(xCodeBased$iterations, x$iterations, tolerance = 1e-05) expect_equal(xCodeBased$rejectAtLeastOne, x$rejectAtLeastOne, tolerance = 1e-05) expect_equal(xCodeBased$rejectedArmsPerStage, x$rejectedArmsPerStage, tolerance = 1e-05) expect_equal(xCodeBased$futilityStop, x$futilityStop, tolerance = 1e-05) expect_equal(xCodeBased$futilityPerStage, x$futilityPerStage, tolerance = 1e-05) expect_equal(xCodeBased$earlyStop, x$earlyStop, tolerance = 1e-05) expect_equal(xCodeBased$successPerStage, x$successPerStage, tolerance = 1e-05) expect_equal(xCodeBased$selectedArms, x$selectedArms, tolerance = 1e-05) expect_equal(xCodeBased$numberOfActiveArms, x$numberOfActiveArms, tolerance = 1e-05) expect_equal(xCodeBased$expectedNumberOfSubjects, x$expectedNumberOfSubjects, tolerance = 1e-05) expect_equal(xCodeBased$sampleSizes, x$sampleSizes, tolerance = 1e-05) expect_equal(xCodeBased$conditionalPowerAchieved, x$conditionalPowerAchieved, tolerance = 1e-05) expect_type(names(x), "character") df <- as.data.frame(x) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(x) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) test_that("'getSimulationMultiArmRates': comparison of base and multi-arm", { .skipTestIfDisabled() .skipTestIfNotX64() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmRates} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmRatesGenerate} # @refFS[Formula]{fs:simulationMultiArmRatesTestStatistics} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} allocationRatioPlanned <- 2 design <- getDesignInverseNormal( typeOfDesign = "WT", deltaWT = 0.15, futilityBounds = c(-0.5, 0.5), informationRates = c(0.2, 0.8, 1) ) x <- getSimulationMultiArmRates(design, activeArms = 1, plannedSubjects = c(20, 40, 60), directionUpper = FALSE, piControl = 0.6, piMaxVector = seq(0.3, 0.6, 0.1), conditionalPower = 0.6, minNumberOfSubjectsPerStage = c(NA, 20, 20), maxNumberOfSubjectsPerStage = c(NA, 80, 80), piControlH1 = 0.4, piTreatmentsH1 = 0.3, maxNumberOfIterations = 100, allocationRatioPlanned = allocationRatioPlanned, seed = 1234 ) y <- getSimulationRates(design, plannedSubjects = round((1 + 1 / allocationRatioPlanned) * c(20, 40, 60)), normalApproximation = TRUE, pi2 = 0.6, pi1 = seq(0.3, 0.6, 0.1), directionUpper = FALSE, conditionalPower = 0.6, pi2H1 = 0.4, pi1H1 = 0.3, minNumberOfSubjectsPerStage = round((1 + 1 / allocationRatioPlanned) * c(NA, 20, 20)), maxNumberOfSubjectsPerStage = round((1 + 1 / allocationRatioPlanned) * c(NA, 80, 80)), maxNumberOfIterations = 100, allocationRatioPlanned = allocationRatioPlanned, seed = 1234 ) comp1 <- y$overallReject - x$rejectAtLeastOne ## Comparison of the results of numeric object 'comp1' with expected results expect_equal(comp1, c(-0.03, -0.02, 0.09, 0.03), tolerance = 1e-07) comp2 <- y$rejectPerStage - x$rejectedArmsPerStage[, , 1] ## Comparison of the results of matrixarray object 'comp2' with expected results expect_equal(comp2[1, ], c(0, 0, 0, 0)) expect_equal(comp2[2, ], c(0.09, -0.01, 0.06, 0.02), tolerance = 1e-07) expect_equal(comp2[3, ], c(-0.12, -0.01, 0.03, 0.01), tolerance = 1e-07) comp3 <- y$futilityPerStage - x$futilityPerStage ## Comparison of the results of matrixarray object 'comp3' with expected results expect_equal(comp3[1, ], c(0.04, 0.04, -0.12, -0.03), tolerance = 1e-07) expect_equal(comp3[2, ], c(0.01, 0.02, -0.05, 0.03), tolerance = 1e-07) comp4 <- round(y$sampleSizes - (x$sampleSizes[, , 1] + x$sampleSizes[, , 2]), 1) ## Comparison of the results of matrixarray object 'comp4' with expected results expect_equal(comp4[1, ], c(0, 0, 0, 0)) expect_equal(comp4[2, ], c(1.1, 0.3, 0, 0), tolerance = 1e-07) expect_equal(comp4[3, ], c(-44.7, 9.7, 1.3, -3.2), tolerance = 1e-07) comp5 <- round(y$expectedNumberOfSubjects - x$expectedNumberOfSubjects, 1) ## Comparison of the results of numeric object 'comp5' with expected results expect_equal(comp5, c(-14.6, -6.6, 26.9, 0.4), tolerance = 1e-07) comp6 <- x$earlyStop - y$earlyStop ## Comparison of the results of matrixarray object 'comp6' with expected results expect_equal(comp6[1, ], c(-0.96, -0.39, -0.75, -0.06), tolerance = 1e-07) expect_equal(comp6[2, ], c(0.1, -0.16, -0.38, -0.43), tolerance = 1e-07) }) test_that("'getSimulationMultiArmRates': comparison of base and multi-arm, Fisher design", { .skipTestIfDisabled() .skipTestIfNotX64() # @refFS[Sec.]{fs:sec:reproducibilityOfSimulationResults} # @refFS[Sec.]{fs:sec:simulatingMultiArmDesigns} # @refFS[Sec.]{fs:sec:simulatingMultiArmDoseResponseRelationShips} # @refFS[Sec.]{fs:sec:simulatingMultiArmSelections} # @refFS[Tab.]{fs:tab:output:getSimulationMultiArmRates} # @refFS[Formula]{fs:simulationMultiArmDoseResponse} # @refFS[Formula]{fs:simulationMultiArmRatesGenerate} # @refFS[Formula]{fs:simulationMultiArmRatesTestStatistics} # @refFS[Formula]{fs:simulationMultiArmSelections} # @refFS[Formula]{fs:multiarmRejectionRule} allocationRatioPlanned <- 1 design <- getDesignFisher(alpha0Vec = c(0.3, 0.4), informationRates = c(0.5, 0.7, 1)) x <- getSimulationMultiArmRates(design, activeArms = 1, plannedSubjects = c(20, 40, 60), directionUpper = FALSE, piControl = 0.6, piMaxVector = seq(0.3, 0.6, 0.1), conditionalPower = 0.6, minNumberOfSubjectsPerStage = c(NA, 20, 20), maxNumberOfSubjectsPerStage = c(NA, 80, 80), maxNumberOfIterations = 100, allocationRatioPlanned = allocationRatioPlanned, seed = -1008239793 ) y <- getSimulationRates(design, plannedSubjects = round((1 + 1 / allocationRatioPlanned) * c(20, 40, 60)), normalApproximation = TRUE, pi2 = 0.6, pi1 = seq(0.3, 0.6, 0.1), directionUpper = FALSE, conditionalPower = 0.6, minNumberOfSubjectsPerStage = round((1 + 1 / allocationRatioPlanned) * c(NA, 20, 20)), maxNumberOfSubjectsPerStage = round((1 + 1 / allocationRatioPlanned) * c(NA, 80, 80)), maxNumberOfIterations = 100, allocationRatioPlanned = allocationRatioPlanned, seed = -2039707705 ) comp1 <- y$overallReject - x$rejectAtLeastOne ## Comparison of the results of numeric object 'comp1' with expected results expect_equal(comp1, c(0.05, 0.1, 0.07, 0.02), tolerance = 1e-07) comp2 <- y$rejectPerStage - x$rejectedArmsPerStage[, , 1] ## Comparison of the results of matrixarray object 'comp2' with expected results expect_equal(comp2[1, ], c(0.05, 0.01, 0.02, 0.03), tolerance = 1e-07) expect_equal(comp2[2, ], c(-0.03, 0.04, -0.01, -0.01), tolerance = 1e-07) expect_equal(comp2[3, ], c(0.03, 0.05, 0.06, 0), tolerance = 1e-07) comp3 <- y$futilityPerStage - x$futilityPerStage ## Comparison of the results of matrixarray object 'comp3' with expected results expect_equal(comp3[1, ], c(-0.05, -0.09, 0, 0), tolerance = 1e-07) expect_equal(comp3[2, ], c(0, 0, -0.05, 0.01), tolerance = 1e-07) comp4 <- round(y$sampleSizes - (x$sampleSizes[, , 1] + x$sampleSizes[, , 2]), 1) ## Comparison of the results of matrixarray object 'comp4' with expected results expect_equal(comp4[1, ], c(0, 0, 0, 0)) expect_equal(comp4[2, ], c(7.4, 3.6, -6.3, 6.6), tolerance = 1e-07) expect_equal(comp4[3, ], c(0.5, 12.9, -5, 26), tolerance = 1e-07) comp5 <- round(y$expectedNumberOfSubjects - x$expectedNumberOfSubjects, 1) ## Comparison of the results of numeric object 'comp5' with expected results expect_equal(comp5, c(6.1, 19.9, -2, -3.9), tolerance = 1e-07) comp6 <- x$earlyStop - y$earlyStop ## Comparison of the results of matrixarray object 'comp6' with expected results expect_equal(comp6[1, ], c(-0.38, -0.17, -0.41, 0.14), tolerance = 1e-07) expect_equal(comp6[2, ], c(-0.29, -0.61, -0.52, -0.78), tolerance = 1e-07) }) rpact/tests/testthat/test-f_analysis_multiarm_survival.R0000644000176200001440000021764214370207346023504 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-f_analysis_multiarm_survival.R ## | Creation date: 06 February 2023, 12:11:07 ## | File version: $Revision: 6801 $ ## | Last changed: $Date: 2023-02-06 15:29:57 +0100 (Mon, 06 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | test_plan_section("Testing the Analysis Survival Functionality for Three or More Treatments") test_that("'getAnalysisResultsMultiArm' with survival data and different options", { design1 <- getDesignInverseNormal( kMax = 3, alpha = 0.025, futilityBounds = c(-0.5, 0), bindingFutility = FALSE, typeOfDesign = "asKD", gammaA = 1.2, informationRates = c(0.4, 0.7, 1) ) design2 <- getDesignFisher( kMax = 3, alpha = 0.025, alpha0Vec = c(0.7, 0.5), method = "equalAlpha", bindingFutility = TRUE, informationRates = c(0.4, 0.7, 1) ) design3 <- getDesignConditionalDunnett(alpha = 0.025, informationAtInterim = 0.4, secondStageConditioning = TRUE) # directionUpper = TRUE dataExample1 <- getDataset( events1 = c(25, 32), events2 = c(18, NA), logRanks1 = c(2.2, 1.8), logRanks2 = c(1.99, NA) ) # directionUpper = FALSE dataExample2 <- getDataset( events1 = c(25, 32), events2 = c(18, NA), logRanks1 = -c(2.2, 1.8), logRanks2 = -c(1.99, NA) ) # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results1 <- getAnalysisResults(design = design1, dataInput = dataExample1, intersectionTest = "Dunnett", nPlanned = c(20), directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results1' with expected results expect_equal(results1$thetaH1[1, ], 2.1027372, tolerance = 1e-05) expect_equal(results1$thetaH1[2, ], NA_real_) expect_equal(results1$conditionalRejectionProbabilities[1, ], c(0.16551988, 0.53357187, NA_real_), tolerance = 1e-05) expect_equal(results1$conditionalRejectionProbabilities[2, ], c(0.16551988, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$conditionalPower[1, ], c(NA_real_, NA_real_, 0.95961075), tolerance = 1e-05) expect_equal(results1$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results1$repeatedConfidenceIntervalLowerBounds[1, ], c(0.84462483, 1.0978923, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalLowerBounds[2, ], c(0.74230032, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalUpperBounds[1, ], c(6.8816796, 4.1951386, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedConfidenceIntervalUpperBounds[2, ], c(8.7950723, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedPValues[1, ], c(0.077362906, 0.0096216473, NA_real_), tolerance = 1e-05) expect_equal(results1$repeatedPValues[2, ], c(0.077362906, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results1), NA))) expect_output(print(results1)$show()) invisible(capture.output(expect_error(summary(results1), NA))) expect_output(summary(results1)$show()) results1CodeBased <- eval(parse(text = getObjectRCode(results1, stringWrapParagraphWidth = NULL))) expect_equal(results1CodeBased$thetaH1, results1$thetaH1, tolerance = 1e-05) expect_equal(results1CodeBased$conditionalRejectionProbabilities, results1$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results1CodeBased$conditionalPower, results1$conditionalPower, tolerance = 1e-05) expect_equal(results1CodeBased$repeatedConfidenceIntervalLowerBounds, results1$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results1CodeBased$repeatedConfidenceIntervalUpperBounds, results1$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results1CodeBased$repeatedPValues, results1$repeatedPValues, tolerance = 1e-05) expect_type(names(results1), "character") df <- as.data.frame(results1) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results1) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } .skipTestIfDisabled() # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results2 <- getAnalysisResults(design = design1, dataInput = dataExample1, intersectionTest = "Simes", nPlanned = c(20), directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results2' with expected results expect_equal(results2$thetaH1[1, ], 2.1027372, tolerance = 1e-05) expect_equal(results2$thetaH1[2, ], NA_real_) expect_equal(results2$conditionalRejectionProbabilities[1, ], c(0.17669226, 0.55323067, NA_real_), tolerance = 1e-05) expect_equal(results2$conditionalRejectionProbabilities[2, ], c(0.17669226, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$conditionalPower[1, ], c(NA_real_, NA_real_, 0.96373388), tolerance = 1e-05) expect_equal(results2$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results2$repeatedConfidenceIntervalLowerBounds[1, ], c(0.83909619, 1.0883368, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalLowerBounds[2, ], c(0.73657742, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalUpperBounds[1, ], c(6.9270216, 4.2761956, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedConfidenceIntervalUpperBounds[2, ], c(8.863406, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedPValues[1, ], c(0.069951918, 0.0087766935, NA_real_), tolerance = 1e-05) expect_equal(results2$repeatedPValues[2, ], c(0.069951918, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results2), NA))) expect_output(print(results2)$show()) invisible(capture.output(expect_error(summary(results2), NA))) expect_output(summary(results2)$show()) results2CodeBased <- eval(parse(text = getObjectRCode(results2, stringWrapParagraphWidth = NULL))) expect_equal(results2CodeBased$thetaH1, results2$thetaH1, tolerance = 1e-05) expect_equal(results2CodeBased$conditionalRejectionProbabilities, results2$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results2CodeBased$conditionalPower, results2$conditionalPower, tolerance = 1e-05) expect_equal(results2CodeBased$repeatedConfidenceIntervalLowerBounds, results2$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results2CodeBased$repeatedConfidenceIntervalUpperBounds, results2$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results2CodeBased$repeatedPValues, results2$repeatedPValues, tolerance = 1e-05) expect_type(names(results2), "character") df <- as.data.frame(results2) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results2) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results3 <- getAnalysisResults(design = design1, dataInput = dataExample1, intersectionTest = "Sidak", nPlanned = c(20), directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results3' with expected results expect_equal(results3$thetaH1[1, ], 2.1027372, tolerance = 1e-05) expect_equal(results3$thetaH1[2, ], NA_real_) expect_equal(results3$conditionalRejectionProbabilities[1, ], c(0.15801679, 0.51979239, NA_real_), tolerance = 1e-05) expect_equal(results3$conditionalRejectionProbabilities[2, ], c(0.15801679, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results3$conditionalPower[1, ], c(NA_real_, NA_real_, 0.9565118), tolerance = 1e-05) expect_equal(results3$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results3$repeatedConfidenceIntervalLowerBounds[1, ], c(0.83933393, 1.0895056, NA_real_), tolerance = 1e-05) expect_equal(results3$repeatedConfidenceIntervalLowerBounds[2, ], c(0.73682316, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results3$repeatedConfidenceIntervalUpperBounds[1, ], c(6.9250602, 4.2563039, NA_real_), tolerance = 1e-05) expect_equal(results3$repeatedConfidenceIntervalUpperBounds[2, ], c(8.8604482, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results3$repeatedPValues[1, ], c(0.082919001, 0.010252978, NA_real_), tolerance = 1e-05) expect_equal(results3$repeatedPValues[2, ], c(0.082919001, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results3), NA))) expect_output(print(results3)$show()) invisible(capture.output(expect_error(summary(results3), NA))) expect_output(summary(results3)$show()) results3CodeBased <- eval(parse(text = getObjectRCode(results3, stringWrapParagraphWidth = NULL))) expect_equal(results3CodeBased$thetaH1, results3$thetaH1, tolerance = 1e-05) expect_equal(results3CodeBased$conditionalRejectionProbabilities, results3$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results3CodeBased$conditionalPower, results3$conditionalPower, tolerance = 1e-05) expect_equal(results3CodeBased$repeatedConfidenceIntervalLowerBounds, results3$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results3CodeBased$repeatedConfidenceIntervalUpperBounds, results3$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results3CodeBased$repeatedPValues, results3$repeatedPValues, tolerance = 1e-05) expect_type(names(results3), "character") df <- as.data.frame(results3) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results3) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results4 <- getAnalysisResults(design = design1, dataInput = dataExample1, intersectionTest = "Bonferroni", nPlanned = c(20), directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results4' with expected results expect_equal(results4$thetaH1[1, ], 2.1027372, tolerance = 1e-05) expect_equal(results4$thetaH1[2, ], NA_real_) expect_equal(results4$conditionalRejectionProbabilities[1, ], c(0.15727093, 0.51839597, NA_real_), tolerance = 1e-05) expect_equal(results4$conditionalRejectionProbabilities[2, ], c(0.15727093, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$conditionalPower[1, ], c(NA_real_, NA_real_, 0.95618769), tolerance = 1e-05) expect_equal(results4$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results4$repeatedConfidenceIntervalLowerBounds[1, ], c(0.83909619, 1.0883368, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalLowerBounds[2, ], c(0.73657742, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalUpperBounds[1, ], c(6.9270216, 4.2761956, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedConfidenceIntervalUpperBounds[2, ], c(8.863406, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedPValues[1, ], c(0.083499788, 0.010318782, NA_real_), tolerance = 1e-05) expect_equal(results4$repeatedPValues[2, ], c(0.083499788, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results4), NA))) expect_output(print(results4)$show()) invisible(capture.output(expect_error(summary(results4), NA))) expect_output(summary(results4)$show()) results4CodeBased <- eval(parse(text = getObjectRCode(results4, stringWrapParagraphWidth = NULL))) expect_equal(results4CodeBased$thetaH1, results4$thetaH1, tolerance = 1e-05) expect_equal(results4CodeBased$conditionalRejectionProbabilities, results4$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results4CodeBased$conditionalPower, results4$conditionalPower, tolerance = 1e-05) expect_equal(results4CodeBased$repeatedConfidenceIntervalLowerBounds, results4$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results4CodeBased$repeatedConfidenceIntervalUpperBounds, results4$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results4CodeBased$repeatedPValues, results4$repeatedPValues, tolerance = 1e-05) expect_type(names(results4), "character") df <- as.data.frame(results4) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results4) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results5 <- getAnalysisResults(design = design2, dataInput = dataExample1, intersectionTest = "Dunnett", nPlanned = c(20), seed = 1234, iterations = 1000, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results5' with expected results expect_equal(results5$thetaH1[1, ], 2.1027372, tolerance = 1e-05) expect_equal(results5$thetaH1[2, ], NA_real_) expect_equal(results5$conditionalRejectionProbabilities[1, ], c(0.10966368, 1, NA_real_), tolerance = 1e-05) expect_equal(results5$conditionalRejectionProbabilities[2, ], c(0.10966368, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$conditionalPower[1, ], c(NA_real_, NA_real_, 0.93227664), tolerance = 1e-05) expect_equal(results5$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results5$repeatedConfidenceIntervalLowerBounds[1, ], c(0.91202463, 1.0654055, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedConfidenceIntervalLowerBounds[2, ], c(0.81259534, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedConfidenceIntervalUpperBounds[1, ], c(6.3731146, 4.2132456, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedConfidenceIntervalUpperBounds[2, ], c(8.0342369, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedPValues[1, ], c(0.04389568, 0.013378163, NA_real_), tolerance = 1e-05) expect_equal(results5$repeatedPValues[2, ], c(0.04389568, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results5), NA))) expect_output(print(results5)$show()) invisible(capture.output(expect_error(summary(results5), NA))) expect_output(summary(results5)$show()) results5CodeBased <- eval(parse(text = getObjectRCode(results5, stringWrapParagraphWidth = NULL))) expect_equal(results5CodeBased$thetaH1, results5$thetaH1, tolerance = 1e-05) expect_equal(results5CodeBased$conditionalRejectionProbabilities, results5$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results5CodeBased$conditionalPower, results5$conditionalPower, tolerance = 1e-05) expect_equal(results5CodeBased$repeatedConfidenceIntervalLowerBounds, results5$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results5CodeBased$repeatedConfidenceIntervalUpperBounds, results5$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results5CodeBased$repeatedPValues, results5$repeatedPValues, tolerance = 1e-05) expect_type(names(results5), "character") df <- as.data.frame(results5) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results5) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results6 <- getAnalysisResults(design = design2, dataInput = dataExample1, intersectionTest = "Simes", nPlanned = c(20), seed = 1234, iterations = 1000, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results6' with expected results expect_equal(results6$thetaH1[1, ], 2.1027372, tolerance = 1e-05) expect_equal(results6$thetaH1[2, ], NA_real_) expect_equal(results6$conditionalRejectionProbabilities[1, ], c(0.1211541, 1, NA_real_), tolerance = 1e-05) expect_equal(results6$conditionalRejectionProbabilities[2, ], c(0.1211541, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results6$conditionalPower[1, ], c(NA_real_, NA_real_, 0.94819096), tolerance = 1e-05) expect_equal(results6$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results6$repeatedConfidenceIntervalLowerBounds[1, ], c(0.90417824, 1.0568242, NA_real_), tolerance = 1e-05) expect_equal(results6$repeatedConfidenceIntervalLowerBounds[2, ], c(0.80436275, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results6$repeatedConfidenceIntervalUpperBounds[1, ], c(6.4284199, 4.2747728, NA_real_), tolerance = 1e-05) expect_equal(results6$repeatedConfidenceIntervalUpperBounds[2, ], c(8.1164667, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results6$repeatedPValues[1, ], c(0.039924588, 0.01222708, NA_real_), tolerance = 1e-05) expect_equal(results6$repeatedPValues[2, ], c(0.039924588, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results6), NA))) expect_output(print(results6)$show()) invisible(capture.output(expect_error(summary(results6), NA))) expect_output(summary(results6)$show()) results6CodeBased <- eval(parse(text = getObjectRCode(results6, stringWrapParagraphWidth = NULL))) expect_equal(results6CodeBased$thetaH1, results6$thetaH1, tolerance = 1e-05) expect_equal(results6CodeBased$conditionalRejectionProbabilities, results6$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results6CodeBased$conditionalPower, results6$conditionalPower, tolerance = 1e-05) expect_equal(results6CodeBased$repeatedConfidenceIntervalLowerBounds, results6$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results6CodeBased$repeatedConfidenceIntervalUpperBounds, results6$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results6CodeBased$repeatedPValues, results6$repeatedPValues, tolerance = 1e-05) expect_type(names(results6), "character") df <- as.data.frame(results6) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results6) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results7 <- getAnalysisResults(design = design2, dataInput = dataExample1, intersectionTest = "Sidak", nPlanned = c(20), seed = 1234, iterations = 1000, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results7' with expected results expect_equal(results7$thetaH1[1, ], 2.1027372, tolerance = 1e-05) expect_equal(results7$thetaH1[2, ], NA_real_) expect_equal(results7$conditionalRejectionProbabilities[1, ], c(0.1023739, 1, NA_real_), tolerance = 1e-05) expect_equal(results7$conditionalRejectionProbabilities[2, ], c(0.1023739, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results7$conditionalPower[1, ], c(NA_real_, NA_real_, 0.92036569), tolerance = 1e-05) expect_equal(results7$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results7$repeatedConfidenceIntervalLowerBounds[1, ], c(0.90464342, 1.0577667, NA_real_), tolerance = 1e-05) expect_equal(results7$repeatedConfidenceIntervalLowerBounds[2, ], c(0.80485046, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results7$repeatedConfidenceIntervalUpperBounds[1, ], c(6.4251144, 4.2597035, NA_real_), tolerance = 1e-05) expect_equal(results7$repeatedConfidenceIntervalUpperBounds[2, ], c(8.1115484, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results7$repeatedPValues[1, ], c(0.046853018, 0.014230746, NA_real_), tolerance = 1e-05) expect_equal(results7$repeatedPValues[2, ], c(0.046853018, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results7), NA))) expect_output(print(results7)$show()) invisible(capture.output(expect_error(summary(results7), NA))) expect_output(summary(results7)$show()) results7CodeBased <- eval(parse(text = getObjectRCode(results7, stringWrapParagraphWidth = NULL))) expect_equal(results7CodeBased$thetaH1, results7$thetaH1, tolerance = 1e-05) expect_equal(results7CodeBased$conditionalRejectionProbabilities, results7$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results7CodeBased$conditionalPower, results7$conditionalPower, tolerance = 1e-05) expect_equal(results7CodeBased$repeatedConfidenceIntervalLowerBounds, results7$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results7CodeBased$repeatedConfidenceIntervalUpperBounds, results7$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results7CodeBased$repeatedPValues, results7$repeatedPValues, tolerance = 1e-05) expect_type(names(results7), "character") df <- as.data.frame(results7) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results7) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results8 <- getAnalysisResults(design = design2, dataInput = dataExample1, intersectionTest = "Bonferroni", nPlanned = c(20), seed = 1234, iterations = 1000, directionUpper = TRUE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results8' with expected results expect_equal(results8$thetaH1[1, ], 2.1027372, tolerance = 1e-05) expect_equal(results8$thetaH1[2, ], NA_real_) expect_equal(results8$conditionalRejectionProbabilities[1, ], c(0.10166729, 1, NA_real_), tolerance = 1e-05) expect_equal(results8$conditionalRejectionProbabilities[2, ], c(0.10166729, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results8$conditionalPower[1, ], c(NA_real_, NA_real_, 0.91912747), tolerance = 1e-05) expect_equal(results8$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results8$repeatedConfidenceIntervalLowerBounds[1, ], c(0.90417824, 1.0568242, NA_real_), tolerance = 1e-05) expect_equal(results8$repeatedConfidenceIntervalLowerBounds[2, ], c(0.80436275, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results8$repeatedConfidenceIntervalUpperBounds[1, ], c(6.4284199, 4.2747728, NA_real_), tolerance = 1e-05) expect_equal(results8$repeatedConfidenceIntervalUpperBounds[2, ], c(8.1164667, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results8$repeatedPValues[1, ], c(0.047161054, 0.014319438, NA_real_), tolerance = 1e-05) expect_equal(results8$repeatedPValues[2, ], c(0.047161054, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results8), NA))) expect_output(print(results8)$show()) invisible(capture.output(expect_error(summary(results8), NA))) expect_output(summary(results8)$show()) results8CodeBased <- eval(parse(text = getObjectRCode(results8, stringWrapParagraphWidth = NULL))) expect_equal(results8CodeBased$thetaH1, results8$thetaH1, tolerance = 1e-05) expect_equal(results8CodeBased$conditionalRejectionProbabilities, results8$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results8CodeBased$conditionalPower, results8$conditionalPower, tolerance = 1e-05) expect_equal(results8CodeBased$repeatedConfidenceIntervalLowerBounds, results8$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results8CodeBased$repeatedConfidenceIntervalUpperBounds, results8$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results8CodeBased$repeatedPValues, results8$repeatedPValues, tolerance = 1e-05) expect_type(names(results8), "character") df <- as.data.frame(results8) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results8) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results9 <- getAnalysisResults(design = design3, dataInput = dataExample1, intersectionTest = "Dunnett", directionUpper = TRUE) ## Comparison of the results of AnalysisResultsConditionalDunnett object 'results9' with expected results expect_equal(results9$thetaH1[1, ], 2.1027372, tolerance = 1e-05) expect_equal(results9$thetaH1[2, ], NA_real_) expect_equal(results9$conditionalRejectionProbabilities[1, ], c(NA_real_, 0.20921255), tolerance = 1e-05) expect_equal(results9$conditionalRejectionProbabilities[2, ], c(NA_real_, 0.18260705), tolerance = 1e-05) expect_equal(results9$conditionalPower[1, ], c(NA_real_, NA_real_)) expect_equal(results9$conditionalPower[2, ], c(NA_real_, NA_real_)) expect_equal(results9$repeatedConfidenceIntervalLowerBounds[1, ], c(NA_real_, 1.2250509), tolerance = 1e-05) expect_equal(results9$repeatedConfidenceIntervalLowerBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results9$repeatedConfidenceIntervalUpperBounds[1, ], c(NA_real_, 3.6401262), tolerance = 1e-05) expect_equal(results9$repeatedConfidenceIntervalUpperBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results9$repeatedPValues[1, ], c(NA_real_, 0.0032883088), tolerance = 1e-05) expect_equal(results9$repeatedPValues[2, ], c(NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results9), NA))) expect_output(print(results9)$show()) invisible(capture.output(expect_error(summary(results9), NA))) expect_output(summary(results9)$show()) results9CodeBased <- eval(parse(text = getObjectRCode(results9, stringWrapParagraphWidth = NULL))) expect_equal(results9CodeBased$thetaH1, results9$thetaH1, tolerance = 1e-05) expect_equal(results9CodeBased$conditionalRejectionProbabilities, results9$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results9CodeBased$conditionalPower, results9$conditionalPower, tolerance = 1e-05) expect_equal(results9CodeBased$repeatedConfidenceIntervalLowerBounds, results9$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results9CodeBased$repeatedConfidenceIntervalUpperBounds, results9$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results9CodeBased$repeatedPValues, results9$repeatedPValues, tolerance = 1e-05) expect_type(names(results9), "character") df <- as.data.frame(results9) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results9) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results10 <- getAnalysisResults(design = design1, dataInput = dataExample2, intersectionTest = "Dunnett", nPlanned = c(20), directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results10' with expected results expect_equal(results10$thetaH1[1, ], 0.47557061, tolerance = 1e-05) expect_equal(results10$thetaH1[2, ], NA_real_) expect_equal(results10$conditionalRejectionProbabilities[1, ], c(0.16551988, 0.53357187, NA_real_), tolerance = 1e-05) expect_equal(results10$conditionalRejectionProbabilities[2, ], c(0.16551988, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results10$conditionalPower[1, ], c(NA_real_, NA_real_, 0.95961075), tolerance = 1e-05) expect_equal(results10$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results10$repeatedConfidenceIntervalLowerBounds[1, ], c(0.14531336, 0.23837116, NA_real_), tolerance = 1e-05) expect_equal(results10$repeatedConfidenceIntervalLowerBounds[2, ], c(0.11370003, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results10$repeatedConfidenceIntervalUpperBounds[1, ], c(1.1839576, 0.91083607, NA_real_), tolerance = 1e-05) expect_equal(results10$repeatedConfidenceIntervalUpperBounds[2, ], c(1.3471639, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results10$repeatedPValues[1, ], c(0.077362906, 0.0096216473, NA_real_), tolerance = 1e-05) expect_equal(results10$repeatedPValues[2, ], c(0.077362906, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results10), NA))) expect_output(print(results10)$show()) invisible(capture.output(expect_error(summary(results10), NA))) expect_output(summary(results10)$show()) results10CodeBased <- eval(parse(text = getObjectRCode(results10, stringWrapParagraphWidth = NULL))) expect_equal(results10CodeBased$thetaH1, results10$thetaH1, tolerance = 1e-05) expect_equal(results10CodeBased$conditionalRejectionProbabilities, results10$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results10CodeBased$conditionalPower, results10$conditionalPower, tolerance = 1e-05) expect_equal(results10CodeBased$repeatedConfidenceIntervalLowerBounds, results10$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results10CodeBased$repeatedConfidenceIntervalUpperBounds, results10$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results10CodeBased$repeatedPValues, results10$repeatedPValues, tolerance = 1e-05) expect_type(names(results10), "character") df <- as.data.frame(results10) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results10) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results11 <- getAnalysisResults(design = design1, dataInput = dataExample2, intersectionTest = "Simes", nPlanned = c(20), directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results11' with expected results expect_equal(results11$thetaH1[1, ], 0.47557061, tolerance = 1e-05) expect_equal(results11$thetaH1[2, ], NA_real_) expect_equal(results11$conditionalRejectionProbabilities[1, ], c(0.17669226, 0.55323067, NA_real_), tolerance = 1e-05) expect_equal(results11$conditionalRejectionProbabilities[2, ], c(0.17669226, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results11$conditionalPower[1, ], c(NA_real_, NA_real_, 0.96373388), tolerance = 1e-05) expect_equal(results11$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results11$repeatedConfidenceIntervalLowerBounds[1, ], c(0.14436219, 0.23385274, NA_real_), tolerance = 1e-05) expect_equal(results11$repeatedConfidenceIntervalLowerBounds[2, ], c(0.11282345, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results11$repeatedConfidenceIntervalUpperBounds[1, ], c(1.1917585, 0.91883308, NA_real_), tolerance = 1e-05) expect_equal(results11$repeatedConfidenceIntervalUpperBounds[2, ], c(1.3576306, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results11$repeatedPValues[1, ], c(0.069951918, 0.0087766935, NA_real_), tolerance = 1e-05) expect_equal(results11$repeatedPValues[2, ], c(0.069951918, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results11), NA))) expect_output(print(results11)$show()) invisible(capture.output(expect_error(summary(results11), NA))) expect_output(summary(results11)$show()) results11CodeBased <- eval(parse(text = getObjectRCode(results11, stringWrapParagraphWidth = NULL))) expect_equal(results11CodeBased$thetaH1, results11$thetaH1, tolerance = 1e-05) expect_equal(results11CodeBased$conditionalRejectionProbabilities, results11$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results11CodeBased$conditionalPower, results11$conditionalPower, tolerance = 1e-05) expect_equal(results11CodeBased$repeatedConfidenceIntervalLowerBounds, results11$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results11CodeBased$repeatedConfidenceIntervalUpperBounds, results11$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results11CodeBased$repeatedPValues, results11$repeatedPValues, tolerance = 1e-05) expect_type(names(results11), "character") df <- as.data.frame(results11) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results11) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results12 <- getAnalysisResults(design = design1, dataInput = dataExample2, intersectionTest = "Sidak", nPlanned = c(20), directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results12' with expected results expect_equal(results12$thetaH1[1, ], 0.47557061, tolerance = 1e-05) expect_equal(results12$thetaH1[2, ], NA_real_) expect_equal(results12$conditionalRejectionProbabilities[1, ], c(0.15801679, 0.51979239, NA_real_), tolerance = 1e-05) expect_equal(results12$conditionalRejectionProbabilities[2, ], c(0.15801679, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results12$conditionalPower[1, ], c(NA_real_, NA_real_, 0.9565118), tolerance = 1e-05) expect_equal(results12$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results12$repeatedConfidenceIntervalLowerBounds[1, ], c(0.14440308, 0.23494562, NA_real_), tolerance = 1e-05) expect_equal(results12$repeatedConfidenceIntervalLowerBounds[2, ], c(0.11286087, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results12$repeatedConfidenceIntervalUpperBounds[1, ], c(1.1914212, 0.91784736, NA_real_), tolerance = 1e-05) expect_equal(results12$repeatedConfidenceIntervalUpperBounds[2, ], c(1.3571775, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results12$repeatedPValues[1, ], c(0.082919001, 0.010252978, NA_real_), tolerance = 1e-05) expect_equal(results12$repeatedPValues[2, ], c(0.082919001, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results12), NA))) expect_output(print(results12)$show()) invisible(capture.output(expect_error(summary(results12), NA))) expect_output(summary(results12)$show()) results12CodeBased <- eval(parse(text = getObjectRCode(results12, stringWrapParagraphWidth = NULL))) expect_equal(results12CodeBased$thetaH1, results12$thetaH1, tolerance = 1e-05) expect_equal(results12CodeBased$conditionalRejectionProbabilities, results12$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results12CodeBased$conditionalPower, results12$conditionalPower, tolerance = 1e-05) expect_equal(results12CodeBased$repeatedConfidenceIntervalLowerBounds, results12$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results12CodeBased$repeatedConfidenceIntervalUpperBounds, results12$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results12CodeBased$repeatedPValues, results12$repeatedPValues, tolerance = 1e-05) expect_type(names(results12), "character") df <- as.data.frame(results12) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results12) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results13 <- getAnalysisResults(design = design1, dataInput = dataExample2, intersectionTest = "Bonferroni", nPlanned = c(20), directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmInverseNormal object 'results13' with expected results expect_equal(results13$thetaH1[1, ], 0.47557061, tolerance = 1e-05) expect_equal(results13$thetaH1[2, ], NA_real_) expect_equal(results13$conditionalRejectionProbabilities[1, ], c(0.15727093, 0.51839597, NA_real_), tolerance = 1e-05) expect_equal(results13$conditionalRejectionProbabilities[2, ], c(0.15727093, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results13$conditionalPower[1, ], c(NA_real_, NA_real_, 0.95618769), tolerance = 1e-05) expect_equal(results13$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results13$repeatedConfidenceIntervalLowerBounds[1, ], c(0.14436219, 0.23385274, NA_real_), tolerance = 1e-05) expect_equal(results13$repeatedConfidenceIntervalLowerBounds[2, ], c(0.11282345, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results13$repeatedConfidenceIntervalUpperBounds[1, ], c(1.1917585, 0.91883308, NA_real_), tolerance = 1e-05) expect_equal(results13$repeatedConfidenceIntervalUpperBounds[2, ], c(1.3576306, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results13$repeatedPValues[1, ], c(0.083499788, 0.010318782, NA_real_), tolerance = 1e-05) expect_equal(results13$repeatedPValues[2, ], c(0.083499788, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results13), NA))) expect_output(print(results13)$show()) invisible(capture.output(expect_error(summary(results13), NA))) expect_output(summary(results13)$show()) results13CodeBased <- eval(parse(text = getObjectRCode(results13, stringWrapParagraphWidth = NULL))) expect_equal(results13CodeBased$thetaH1, results13$thetaH1, tolerance = 1e-05) expect_equal(results13CodeBased$conditionalRejectionProbabilities, results13$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results13CodeBased$conditionalPower, results13$conditionalPower, tolerance = 1e-05) expect_equal(results13CodeBased$repeatedConfidenceIntervalLowerBounds, results13$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results13CodeBased$repeatedConfidenceIntervalUpperBounds, results13$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results13CodeBased$repeatedPValues, results13$repeatedPValues, tolerance = 1e-05) expect_type(names(results13), "character") df <- as.data.frame(results13) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results13) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results14 <- getAnalysisResults(design = design2, dataInput = dataExample2, intersectionTest = "Dunnett", nPlanned = c(20), seed = 1234, iterations = 1000, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results14' with expected results expect_equal(results14$thetaH1[1, ], 0.47557061, tolerance = 1e-05) expect_equal(results14$thetaH1[2, ], NA_real_) expect_equal(results14$conditionalRejectionProbabilities[1, ], c(0.10966368, 1, NA_real_), tolerance = 1e-05) expect_equal(results14$conditionalRejectionProbabilities[2, ], c(0.10966368, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results14$conditionalPower[1, ], c(NA_real_, NA_real_, 0.93227664), tolerance = 1e-05) expect_equal(results14$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results14$repeatedConfidenceIntervalLowerBounds[1, ], c(0.15690919, 0.23734662, NA_real_), tolerance = 1e-05) expect_equal(results14$repeatedConfidenceIntervalLowerBounds[2, ], c(0.12446713, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results14$repeatedConfidenceIntervalUpperBounds[1, ], c(1.0964616, 0.93860979, NA_real_), tolerance = 1e-05) expect_equal(results14$repeatedConfidenceIntervalUpperBounds[2, ], c(1.2306248, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results14$repeatedPValues[1, ], c(0.04389568, 0.013378163, NA_real_), tolerance = 1e-05) expect_equal(results14$repeatedPValues[2, ], c(0.04389568, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results14), NA))) expect_output(print(results14)$show()) invisible(capture.output(expect_error(summary(results14), NA))) expect_output(summary(results14)$show()) results14CodeBased <- eval(parse(text = getObjectRCode(results14, stringWrapParagraphWidth = NULL))) expect_equal(results14CodeBased$thetaH1, results14$thetaH1, tolerance = 1e-05) expect_equal(results14CodeBased$conditionalRejectionProbabilities, results14$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results14CodeBased$conditionalPower, results14$conditionalPower, tolerance = 1e-05) expect_equal(results14CodeBased$repeatedConfidenceIntervalLowerBounds, results14$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results14CodeBased$repeatedConfidenceIntervalUpperBounds, results14$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results14CodeBased$repeatedPValues, results14$repeatedPValues, tolerance = 1e-05) expect_type(names(results14), "character") df <- as.data.frame(results14) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results14) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results15 <- getAnalysisResults(design = design2, dataInput = dataExample2, intersectionTest = "Simes", nPlanned = c(20), seed = 1234, iterations = 1000, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results15' with expected results expect_equal(results15$thetaH1[1, ], 0.47557061, tolerance = 1e-05) expect_equal(results15$thetaH1[2, ], NA_real_) expect_equal(results15$conditionalRejectionProbabilities[1, ], c(0.1211541, 1, NA_real_), tolerance = 1e-05) expect_equal(results15$conditionalRejectionProbabilities[2, ], c(0.1211541, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results15$conditionalPower[1, ], c(NA_real_, NA_real_, 0.94819096), tolerance = 1e-05) expect_equal(results15$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results15$repeatedConfidenceIntervalLowerBounds[1, ], c(0.15555937, 0.23393056, NA_real_), tolerance = 1e-05) expect_equal(results15$repeatedConfidenceIntervalLowerBounds[2, ], c(0.12320632, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results15$repeatedConfidenceIntervalUpperBounds[1, ], c(1.1059766, 0.94623115, NA_real_), tolerance = 1e-05) expect_equal(results15$repeatedConfidenceIntervalUpperBounds[2, ], c(1.2432202, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results15$repeatedPValues[1, ], c(0.039924588, 0.01222708, NA_real_), tolerance = 1e-05) expect_equal(results15$repeatedPValues[2, ], c(0.039924588, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results15), NA))) expect_output(print(results15)$show()) invisible(capture.output(expect_error(summary(results15), NA))) expect_output(summary(results15)$show()) results15CodeBased <- eval(parse(text = getObjectRCode(results15, stringWrapParagraphWidth = NULL))) expect_equal(results15CodeBased$thetaH1, results15$thetaH1, tolerance = 1e-05) expect_equal(results15CodeBased$conditionalRejectionProbabilities, results15$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results15CodeBased$conditionalPower, results15$conditionalPower, tolerance = 1e-05) expect_equal(results15CodeBased$repeatedConfidenceIntervalLowerBounds, results15$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results15CodeBased$repeatedConfidenceIntervalUpperBounds, results15$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results15CodeBased$repeatedPValues, results15$repeatedPValues, tolerance = 1e-05) expect_type(names(results15), "character") df <- as.data.frame(results15) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results15) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results16 <- getAnalysisResults(design = design2, dataInput = dataExample2, intersectionTest = "Sidak", nPlanned = c(20), seed = 1234, iterations = 1000, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results16' with expected results expect_equal(results16$thetaH1[1, ], 0.47557061, tolerance = 1e-05) expect_equal(results16$thetaH1[2, ], NA_real_) expect_equal(results16$conditionalRejectionProbabilities[1, ], c(0.1023739, 1, NA_real_), tolerance = 1e-05) expect_equal(results16$conditionalRejectionProbabilities[2, ], c(0.1023739, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results16$conditionalPower[1, ], c(NA_real_, NA_real_, 0.92036569), tolerance = 1e-05) expect_equal(results16$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results16$repeatedConfidenceIntervalLowerBounds[1, ], c(0.15563938, 0.23475813, NA_real_), tolerance = 1e-05) expect_equal(results16$repeatedConfidenceIntervalLowerBounds[2, ], c(0.1232811, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results16$repeatedConfidenceIntervalUpperBounds[1, ], c(1.1054079, 0.94538806, NA_real_), tolerance = 1e-05) expect_equal(results16$repeatedConfidenceIntervalUpperBounds[2, ], c(1.2424668, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results16$repeatedPValues[1, ], c(0.046853018, 0.014230746, NA_real_), tolerance = 1e-05) expect_equal(results16$repeatedPValues[2, ], c(0.046853018, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results16), NA))) expect_output(print(results16)$show()) invisible(capture.output(expect_error(summary(results16), NA))) expect_output(summary(results16)$show()) results16CodeBased <- eval(parse(text = getObjectRCode(results16, stringWrapParagraphWidth = NULL))) expect_equal(results16CodeBased$thetaH1, results16$thetaH1, tolerance = 1e-05) expect_equal(results16CodeBased$conditionalRejectionProbabilities, results16$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results16CodeBased$conditionalPower, results16$conditionalPower, tolerance = 1e-05) expect_equal(results16CodeBased$repeatedConfidenceIntervalLowerBounds, results16$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results16CodeBased$repeatedConfidenceIntervalUpperBounds, results16$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results16CodeBased$repeatedPValues, results16$repeatedPValues, tolerance = 1e-05) expect_type(names(results16), "character") df <- as.data.frame(results16) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results16) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results17 <- getAnalysisResults(design = design2, dataInput = dataExample2, intersectionTest = "Bonferroni", nPlanned = c(20), seed = 1234, iterations = 1000, directionUpper = FALSE) ## Comparison of the results of AnalysisResultsMultiArmFisher object 'results17' with expected results expect_equal(results17$thetaH1[1, ], 0.47557061, tolerance = 1e-05) expect_equal(results17$thetaH1[2, ], NA_real_) expect_equal(results17$conditionalRejectionProbabilities[1, ], c(0.10166729, 1, NA_real_), tolerance = 1e-05) expect_equal(results17$conditionalRejectionProbabilities[2, ], c(0.10166729, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results17$conditionalPower[1, ], c(NA_real_, NA_real_, 0.91912747), tolerance = 1e-05) expect_equal(results17$conditionalPower[2, ], c(NA_real_, NA_real_, NA_real_)) expect_equal(results17$repeatedConfidenceIntervalLowerBounds[1, ], c(0.15555937, 0.23393056, NA_real_), tolerance = 1e-05) expect_equal(results17$repeatedConfidenceIntervalLowerBounds[2, ], c(0.12320632, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results17$repeatedConfidenceIntervalUpperBounds[1, ], c(1.1059766, 0.94623115, NA_real_), tolerance = 1e-05) expect_equal(results17$repeatedConfidenceIntervalUpperBounds[2, ], c(1.2432202, NA_real_, NA_real_), tolerance = 1e-05) expect_equal(results17$repeatedPValues[1, ], c(0.047161054, 0.014319438, NA_real_), tolerance = 1e-05) expect_equal(results17$repeatedPValues[2, ], c(0.047161054, NA_real_, NA_real_), tolerance = 1e-05) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results17), NA))) expect_output(print(results17)$show()) invisible(capture.output(expect_error(summary(results17), NA))) expect_output(summary(results17)$show()) results17CodeBased <- eval(parse(text = getObjectRCode(results17, stringWrapParagraphWidth = NULL))) expect_equal(results17CodeBased$thetaH1, results17$thetaH1, tolerance = 1e-05) expect_equal(results17CodeBased$conditionalRejectionProbabilities, results17$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results17CodeBased$conditionalPower, results17$conditionalPower, tolerance = 1e-05) expect_equal(results17CodeBased$repeatedConfidenceIntervalLowerBounds, results17$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results17CodeBased$repeatedConfidenceIntervalUpperBounds, results17$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results17CodeBased$repeatedPValues, results17$repeatedPValues, tolerance = 1e-05) expect_type(names(results17), "character") df <- as.data.frame(results17) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results17) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } # @refFS[Formula]{fs:multiarmRejectionRule} # @refFS[Formula]{fs:adjustedPValueDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetDunnett} # @refFS[Formula]{fs:adjustedPValueSubsetBonferroni} # @refFS[Formula]{fs:adjustedPValueSubsetSidak} # @refFS[Formula]{fs:adjustedPValueSubsetSimes} # @refFS[Formula]{fs:adjustedPValueSubsetHierarchical} # @refFS[Formula]{fs:conditionalRejectionProbabilityDunnett} # @refFS[Formula]{fs:pValueConditionalSecondStageDunnett} # @refFS[Formula]{fs:pValueUnconditionalSecondStageDunnett} # @refFS[Formula]{fs:conditionalPowerMultiArm} # @refFS[Formula]{fs:conditionalRejectionProbability} # @refFS[Formula]{fs:conditionalRejectionProbabilityShiftedBoundaries} # @refFS[Formula]{fs:conditionalRejectionProbabilityMultiArm} # @refFS[Formula]{fs:calculationRepeatedpValueMultiArm} # @refFS[Formula]{fs:adjustedPValueForRCIDunnett} # @refFS[Formula]{fs:adjustedPValueForRCIBonferroniSimes} # @refFS[Formula]{fs:adjustedPValueForRCISidak} # @refFS[Formula]{fs:computeRCIsMultiArm} # @refFS[Formula]{fs:testStatisticMultiArmSurvival} results18 <- getAnalysisResults(design = design3, dataInput = dataExample2, intersectionTest = "Dunnett", directionUpper = FALSE) ## Comparison of the results of AnalysisResultsConditionalDunnett object 'results18' with expected results expect_equal(results18$thetaH1[1, ], 0.47557061, tolerance = 1e-05) expect_equal(results18$thetaH1[2, ], NA_real_) expect_equal(results18$conditionalRejectionProbabilities[1, ], c(NA_real_, 0.20921255), tolerance = 1e-05) expect_equal(results18$conditionalRejectionProbabilities[2, ], c(NA_real_, 0.18260705), tolerance = 1e-05) expect_equal(results18$conditionalPower[1, ], c(NA_real_, NA_real_)) expect_equal(results18$conditionalPower[2, ], c(NA_real_, NA_real_)) expect_equal(results18$repeatedConfidenceIntervalLowerBounds[1, ], c(NA_real_, 0.27471638), tolerance = 1e-05) expect_equal(results18$repeatedConfidenceIntervalLowerBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results18$repeatedConfidenceIntervalUpperBounds[1, ], c(NA_real_, 0.81629276), tolerance = 1e-05) expect_equal(results18$repeatedConfidenceIntervalUpperBounds[2, ], c(NA_real_, NA_real_)) expect_equal(results18$repeatedPValues[1, ], c(NA_real_, 0.0032883088), tolerance = 1e-05) expect_equal(results18$repeatedPValues[2, ], c(NA_real_, NA_real_)) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(results18), NA))) expect_output(print(results18)$show()) invisible(capture.output(expect_error(summary(results18), NA))) expect_output(summary(results18)$show()) results18CodeBased <- eval(parse(text = getObjectRCode(results18, stringWrapParagraphWidth = NULL))) expect_equal(results18CodeBased$thetaH1, results18$thetaH1, tolerance = 1e-05) expect_equal(results18CodeBased$conditionalRejectionProbabilities, results18$conditionalRejectionProbabilities, tolerance = 1e-05) expect_equal(results18CodeBased$conditionalPower, results18$conditionalPower, tolerance = 1e-05) expect_equal(results18CodeBased$repeatedConfidenceIntervalLowerBounds, results18$repeatedConfidenceIntervalLowerBounds, tolerance = 1e-05) expect_equal(results18CodeBased$repeatedConfidenceIntervalUpperBounds, results18$repeatedConfidenceIntervalUpperBounds, tolerance = 1e-05) expect_equal(results18CodeBased$repeatedPValues, results18$repeatedPValues, tolerance = 1e-05) expect_type(names(results18), "character") df <- as.data.frame(results18) expect_s3_class(df, "data.frame") expect_true(nrow(df) > 0 && ncol(df) > 0) mtx <- as.matrix(results18) expect_true(is.matrix(mtx)) expect_true(nrow(mtx) > 0 && ncol(mtx) > 0) } }) rpact/tests/testthat.R0000644000176200001440000000007414275377542014531 0ustar liggesusers library(testthat) library(rpact) test_check("rpact") rpact/src/0000755000176200001440000000000014450551404012154 5ustar liggesusersrpact/src/f_assertions.cpp0000644000176200001440000000337714435554745015407 0ustar liggesusers /** * * -- Group sequential design -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 7019 $ * Last changed: $Date: 2023-05-31 07:23:47 +0200 (Mi, 31 Mai 2023) $ * Last changed by: $Author: pahlke $ * */ #include // [[Rcpp::plugins(cpp11)]] using namespace Rcpp; void assertIsInInterval(double x, Rcpp::CharacterVector xName, double lower, double upper, bool lowerInclusive, bool upperInclusive) { std::string fmt = Rcpp::as(xName); if (!(x > lower && x < upper) && !lowerInclusive && !upperInclusive) { stop("Argument out of bounds: '%d' (%s) is out of bounds (%d; %d)", fmt, x, lower, upper); } else if (!(x >= lower && x < upper) && lowerInclusive && !upperInclusive) { stop("Argument out of bounds: '%d' (%s) is out of bounds [%d; %d)", fmt, x, lower, upper); } else if (!(x > lower && x <= upper) && !lowerInclusive && upperInclusive) { stop("Argument out of bounds: '%d' (%s) is out of bounds (%d; %d]", fmt, x, lower, upper); } else if (!(x >= lower && x <= upper) && lowerInclusive && upperInclusive) { stop("Argument out of bounds: '%d' (%s) is out of bounds [%d; %d]", fmt, x, lower, upper); } } void assertIsInInterval(double x, Rcpp::CharacterVector xName, double lower, double upper) { assertIsInInterval(x, xName, lower, upper, true, true); } rpact/src/f_design_fisher_combination_test.cpp0000644000176200001440000006555014373116327021437 0ustar liggesusers/** * * -- Fisher combination test -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 6812 $ * Last changed: $Date: 2023-02-15 09:50:31 +0100 (Mi, 15 Feb 2023) $ * Last changed by: $Author: pahlke $ * */ #include // [[Rcpp::plugins(cpp11)]] #include "f_utilities.h" using namespace Rcpp; int C_KMAX_UPPER_BOUND_FISHER = 6; String C_FISHER_METHOD_USER_DEFINED_ALPHA = "userDefinedAlpha"; String C_FISHER_METHOD_EQUAL_ALPHA = "equalAlpha"; String C_FISHER_METHOD_FULL_ALPHA = "fullAlpha"; String C_FISHER_METHOD_NO_INTERACTION = "noInteraction"; bool isEqualCpp(double x, double y) { return std::abs(x - y) < 1e-10; } int getFisherCombinationCaseKmax2Cpp(NumericVector tVec) { return isEqualCpp((double) tVec[0], 1.0) ? 1 : 2; } double getFisherCombinationSizeKmax2Cpp( NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, double piValue, int caseKmax) { double a1 = alpha0Vec[0]; double c1 = criticalValues[0]; double c2 = criticalValues[1]; double t2 = tVec[0]; if (caseKmax == 1) { return piValue + c2 * (log(a1) - log(c1)); } else { return piValue + pow(c2, (1 / t2)) * t2 / (t2 - 1) * (pow(a1, (1 - 1 / t2)) - pow(c1, (1 - 1 / t2))); } } double getFisherCombinationSizeKmax2Cpp( NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, double piValue) { return getFisherCombinationSizeKmax2Cpp( alpha0Vec, criticalValues, tVec, piValue, getFisherCombinationCaseKmax2Cpp(tVec)); } double getFisherCombinationCaseKmax3Cpp(NumericVector tVec) { double t2 = tVec[0]; double t3 = tVec[1]; if (isEqualCpp(t2, 1) && isEqualCpp(t3, 1)) { return 1; } else if (!isEqualCpp(t2, t3) && !isEqualCpp(t2, 1) && !isEqualCpp(t3, 1)) { return 2; } else if (isEqualCpp(t2, t3) && !isEqualCpp(t2, 1)) { return 3; } else if (isEqualCpp(t2, 1) && !isEqualCpp(t3, 1)) { return 4; } else if (!isEqualCpp(t2, 1) && isEqualCpp(t3, 1)) { return 5; } else return -1; } double getFisherCombinationSizeKmax3Cpp( NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, double piValue, int caseKmax) { double a1 = alpha0Vec[0]; double a2 = alpha0Vec[1]; double c1 = criticalValues[0]; double c2 = criticalValues[1]; double c3 = criticalValues[2]; double t2 = tVec[0]; double t3 = tVec[1]; if (caseKmax == 1) { // Wassmer 1999, recursive formula return piValue + c3 * (log(a2) * log(a1) - log(a2) * log(c1) + 0.5 * pow((log(a1 / c2)), 2) - 0.5 * pow((log(c1 / c2)), 2)); } else if (caseKmax == 2) { return piValue + pow(c3, (1 / t3)) * t3 / (t3 - t2) * ( pow(a2, (1 - t2 / t3)) * t3 / (t3 - 1) * (pow(a1, (1 - 1 / t3)) - pow(c1, (1 - 1 / t3))) - pow(c2, (1 / t2 - 1 / t3)) * t2 / (t2 - 1) * (pow(a1, (1 - 1 / t2)) - pow(c1, (1 - 1 / t2)))); } else if (caseKmax == 3) { return piValue + pow(c3, (1 / t3)) * t3 / (t3 - 1) * ( pow(a1, (1 - 1 / t3)) * (log(a2) - 1 / t2 * (log(c2) - log(a1) + t3 / (t3 - 1))) - pow(c1, (1 - 1 / t3)) * (log(a2) - 1 / t2 * (log(c2) - log(c1) + t3 / (t3 - 1)))); } else if (caseKmax == 4) { return piValue + pow(c3, (1 / t3)) * t3 / (t3 - 1) * (pow(a2, (1 - 1 / t3)) * t3 / (t3 - 1) * (pow(a1, (1 - 1 / t3)) - pow(c1, (1 - 1 / t3))) - pow(c2, (1 - 1 / t3)) * (log(a1) - log(c1))); } else if (caseKmax == 5) { return piValue + c3 / (1 - t2) * (pow(a2, (1 - t2)) * (log(a1) - log(c1)) - pow(c2, (1 / t2 - 1)) * t2 / (t2 - 1) * (pow(a1, (1 - 1 / t2)) - pow(c1, (1 - 1 / t2)))); } else return -1; } double getFisherCombinationSizeKmax3Cpp( NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, double piValue) { return getFisherCombinationSizeKmax3Cpp( alpha0Vec, criticalValues, tVec, piValue, getFisherCombinationCaseKmax2Cpp(tVec)); } double getFisherCombinationCaseKmax4Cpp(NumericVector tVec) { double t2 = tVec[0]; double t3 = tVec[1]; double t4 = tVec[2]; return isEqualCpp(t2, 1) && isEqualCpp(t3, 1) && isEqualCpp(t4, 1) ? 1L : 2L; } double getFisherCombinationSizeApproximatelyKmax4Cpp( NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, double piValue, int caseKmax) { double a1 = alpha0Vec[0]; double a2 = alpha0Vec[1]; double a3 = alpha0Vec[2]; double c1 = criticalValues[0]; double c2 = criticalValues[1]; double c3 = criticalValues[2]; double c4 = criticalValues[3]; double t2 = tVec[0]; double t3 = tVec[1]; double t4 = tVec[2]; // Wassmer 1999, recursive formula if (caseKmax == 1) { return (piValue + c4 * (1.0 / 6.0 * pow(log(a1 * a2 / c3), 3) - 1.0 / 6.0 * pow(log(c1 * a2 / c3), 3) + 0.5 * pow(log(c2 / c3), 2) * log(c1) - 0.5 * pow(log(c2 / c3), 2) * log(a1) + 0.5 * pow(log(a1 / c2), 2) * log(a3) - 0.5 * pow(log(c1 / c2), 2) * log(a3) + log(a3) * log(a2) * log(a1) - log(c1) * log(a2) * log(a3))); } else { //general case for K = 4 double eps = 1e-05; if (isEqualCpp(t2, 1)) t2 += eps; if (isEqualCpp(t3, 1)) t3 += eps; if (isEqualCpp(t4, 1)) t4 += eps; if (isEqualCpp(t2, t3)) t3 += eps; if (isEqualCpp(t2, t4)) t4 += eps; if (isEqualCpp(t3, t4)) t4 += eps; return piValue + pow(c4, (1.0 / t4)) * t4 / (t4 - t3) * ( t4 / (t4 - t2) * t4 / (t4 - 1.0) * pow(a3, (1.0 - t3 / t4)) * pow(a2, (1.0 - t2 / t4)) * (pow(a1, (1.0 - 1.0 / t4)) - pow(c1, (1.0 - 1.0 / t4))) - t4 / (t4 - t2) * t2 / (t2 - 1.0) * pow(a3, (1.0 - t3 / t4)) * pow(c2, (1.0 / t2 - 1.0 / t4)) * (pow(a1, (1.0 - 1.0 / t2)) - pow(c1, (1.0 - 1.0 / t2))) - t3 / (t3 - t2) * t3 / (t3 - 1.0) * pow(c3, (1.0 / t3 - 1.0 / t4)) * pow(a2, (1.0 - t2 / t3)) * (pow(a1, (1.0 - 1.0 / t3)) - pow(c1, (1.0 - 1.0 / t3))) + t3 / (t3 - t2) * t2 / (t2 - 1.0) * pow(c3, (1.0 / t3 - 1.0 / t4)) * pow(c2, (1.0 / t2 - 1.0 / t3)) * (pow(a1, (1 - 1.0 / t2)) - pow(c1, (1.0 - 1.0 / t2)))); } } double getFisherCombinationSizeApproximatelyKmax4Cpp( NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, double piValue) { return getFisherCombinationSizeApproximatelyKmax4Cpp( alpha0Vec, criticalValues, tVec, piValue, getFisherCombinationCaseKmax4Cpp(tVec)); } double getFisherCombinationCaseKmax5Cpp(NumericVector tVec) { double t2 = tVec[0]; double t3 = tVec[1]; double t4 = tVec[2]; double t5 = tVec[3]; return isEqualCpp(t2, 1) && isEqualCpp(t3, 1) && isEqualCpp(t4, 1) && isEqualCpp(t5, 1) ? 1 : 2; } double getFisherCombinationSizeApproximatelyKmax5Cpp( NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, double piValue, int caseKmax) { double a1 = alpha0Vec[0]; double a2 = alpha0Vec[1]; double a3 = alpha0Vec[2]; double a4 = alpha0Vec[3]; double c1 = criticalValues[0]; double c2 = criticalValues[1]; double c3 = criticalValues[2]; double c4 = criticalValues[3]; double c5 = criticalValues[4]; double t2 = tVec[0]; double t3 = tVec[1]; double t4 = tVec[2]; double t5 = tVec[3]; // Wassmer 1999, recursive formula if (caseKmax == 1) { return piValue + c5 * (1.0 / 24.0 * pow(log(a1 * a2 * a3 / c4), 4) - 1.0 / 24.0 * pow(log(c1 * a2 * a3 / c4), 4) + 1.0 / 6.0 * pow(log(c2 * a3 / c4), 3) * log(c1) - 1.0 / 6.0 * pow(log(c2 * a3 / c4), 3) * log(a1) + 1.0 / 4.0 * pow(log(c3 / c4), 2) * pow(log(c1 / c2), 2) - 1.0 / 4.0 * pow(log(c3 / c4), 2) * pow(log(a1 / c2), 2) + 0.5 * pow(log(c3 / c4), 2) * log(a2) * log(c1) - 0.5 * pow(log(c3 / c4), 2) * log(a2) * log(a1) + 1.0 / 6.0 * pow(log(a1 * a2 / c3), 3) * log(a4) - 1.0 / 6.0 * pow(log(c1 * a2 / c3), 3) * log(a4) + 0.5 * pow(log(c2 / c3), 2) * log(a4) * log(c1) - 0.5 * pow(log(c2 / c3), 2) * log(a4) * log(a1) + 0.5 * pow(log(a1 / c2), 2) * log(a3) * log(a4) - 0.5 * pow(log(c1 / c2), 2) * log(a3) * log(a4) + log(a4) * log(a3) * log(a2) * log(a1) - log(c1) * log(a2) * log(a3) * log(a4)); } else { //general case for K = 5 double eps = 1e-05; if (isEqualCpp(t2, 1)) t2 = t2 + eps; if (isEqualCpp(t3, 1)) t3 = t3 + eps; if (isEqualCpp(t4, 1)) t4 = t4 + eps; if (isEqualCpp(t5, 1)) t5 = t5 + eps; if (isEqualCpp(t2, t3)) t3 = t2 + eps; if (isEqualCpp(t2, t4)) t4 = t2 + eps; if (isEqualCpp(t2, t5)) t5 = t2 + eps; if (isEqualCpp(t3, t4)) t4 = t3 + eps; if (isEqualCpp(t3, t5)) t5 = t3 + eps; if (isEqualCpp(t4, t5)) t5 = t4 + eps; return piValue + pow(c5, (1.0 / t5)) * t5 / (t5 - t4) * ( t5 / (t5 - t3) * t5 / (t5 - t2) * t5 / (t5 - 1.0) * pow(a4, (1.0 - t4 / t5)) * pow(a3, (1.0 - t3 / t5)) * pow(a2, (1.0 - t2 / t5)) * (pow(a1, (1.0 - 1.0 / t5)) - pow(c1, (1.0 - 1.0 / t5))) - t5 / (t5 - t3) * t5 / (t5 - t2) * t2 / (t2 - 1.0) * pow(a4, (1.0 - t4 / t5)) * pow(a3, (1.0 - t3 / t5)) * pow(c2, (1.0 / t2 - 1 / t5)) * (pow(a1, (1.0 - 1.0 / t2)) - pow(c1, (1.0 - 1.0 / t2))) - t5 / (t5 - t3) * t3 / (t3 - t2) * t3 / (t3 - 1.0) * pow(a4, (1.0 - t4 / t5)) * pow(c3, (1.0 / t3 - 1 / t5)) * pow(a2, (1.0 - t2 / t3)) * (pow(a1, (1.0 - 1.0 / t3)) - pow(c1, (1.0 - 1.0 / t3))) + t5 / (t5 - t3) * t3 / (t3 - t2) * t2 / (t2 - 1.0) * pow(a4, (1.0 - t4 / t5)) * pow(c3, (1.0 / t3 - 1 / t5)) * pow(c2, (1.0 / t2 - 1 / t3)) * (pow(a1, (1.0 - 1.0 / t2)) - pow(c1, (1.0 - 1.0 / t2))) - t4 / (t4 - t3) * t4 / (t4 - t2) * t4 / (t4 - 1.0) * pow(c4, (1.0 / t4 - 1.0 / t5)) * pow(a3, (1.0 - t3 / t4)) * pow(a2, (1.0 - t2 / t4)) * (pow(a1, (1.0 - 1.0 / t4)) - pow(c1, (1.0 - 1.0 / t4))) + t4 / (t4 - t3) * t4 / (t4 - t2) * t2 / (t2 - 1.0) * pow(c4, (1.0 / t4 - 1.0 / t5)) * pow(a3, (1.0 - t3 / t4)) * pow(c2, (1.0 / t2 - 1 / t4)) * (pow(a1, (1.0 - 1.0 / t2)) - pow(c1, (1.0 - 1.0 / t2))) + t4 / (t4 - t3) * t3 / (t3 - t2) * t3 / (t3 - 1.0) * pow(c4, (1.0 / t4 - 1.0 / t5)) * pow(c3, (1.0 / t3 - 1 / t4)) * pow(a2, (1.0 - t2 / t3)) * (pow(a1, (1.0 - 1.0 / t3)) - pow(c1, (1.0 - 1.0 / t3))) - t4 / (t4 - t3) * t3 / (t3 - t2) * t2 / (t2 - 1.0) * pow(c4, (1.0 / t4 - 1.0 / t5)) * pow(c3, (1.0 / t3 - 1 / t4)) * pow(c2, (1.0 / t2 - 1 / t3)) * (pow(a1, (1.0 - 1.0 / t2)) - pow(c1, (1.0 - 1.0 / t2)))); } } double getFisherCombinationSizeApproximatelyKmax5Cpp( NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, double piValue) { return getFisherCombinationSizeApproximatelyKmax5Cpp( alpha0Vec, criticalValues, tVec, piValue, getFisherCombinationCaseKmax5Cpp(tVec)); } double getFisherCombinationCaseKmax6Cpp(NumericVector tVec) { double t2 = tVec[0]; double t3 = tVec[1]; double t4 = tVec[2]; double t5 = tVec[3]; double t6 = tVec[4]; return isEqualCpp(t2, 1) && isEqualCpp(t3, 1) && isEqualCpp(t4, 1) && isEqualCpp(t5, 1) && isEqualCpp(t6, 1) ? 1 : 2; } double getFisherCombinationSizeApproximatelyKmax6Cpp( NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, double piValue, int caseKmax) { double a1 = alpha0Vec[0]; double a2 = alpha0Vec[1]; double a3 = alpha0Vec[2]; double a4 = alpha0Vec[3]; double a5 = alpha0Vec[4]; double c1 = criticalValues[0]; double c2 = criticalValues[1]; double c3 = criticalValues[2]; double c4 = criticalValues[3]; double c5 = criticalValues[4]; double c6 = criticalValues[5]; double t2 = tVec[0]; double t3 = tVec[1]; double t4 = tVec[2]; double t5 = tVec[3]; double t6 = tVec[4]; // Wassmer 1999, recursive formula if (caseKmax == 1) { return piValue + c6 * ( log(a1) * log(a2) * log(a3) * log(a4) * log(a5) + 1.0 / 24.0 * pow(log(a1 * a2 * a3 / c4), 4) * log(a5) + 1.0 / 120.0 * pow(log(a1 * a2 * a3 * a4 / c5), 5) - 0.5 * pow(log(c4 / c5), 2) * log(a3) * log(a2) * log(a1) + 1.0 / 6.0 * pow(log(a1 * a2 / c3), 3) * log(a4) * log(a5) - 0.5 * pow(log(c3 / c4), 2) * log(a5) * log(a2) * log(a1) - 1.0 / 6.0 * pow(log(c3 * a4 / c5), 3) * log(a2) * log(a1) - 1.0 / 12.0 * pow(log(a1 * a2 / c3), 3) * pow(log(c4 / c5), 2) + 0.5 * pow(log(a1 / c2), 2) * log(a3) * log(a4) * log(a5) - 1.0 / 6.0 * pow(log(c2 * a3 / c4), 3) * log(a5) * log(a1) - 1.0 / 24.0 * pow(log(c2 * a3 * a4 / c5), 4) * log(a1) - 1.0 / 4.0 * pow(log(c4 / c5), 2) * log(a3) * pow(log(a1 / c2), 2) - 0.5 * pow(log(c2 / c3), 2) * log(a4) * log(a5) * log(a1) - 1.0 / 4.0 * pow(log(c3 / c4), 2) * log(a5) * pow(log(a1 / c2), 2) - 1.0 / 12.0 * pow(log(c3 * a4 / c5), 3) * pow(log(a1 / c2), 2) + 1.0 / 4.0 * pow(log(c2 / c3), 2) * pow(log(c4 / c5), 2) * log(a1) - log(c1) * log(a2) * log(a3) * log(a4) * log(a5) - 1.0 / 24.0 * pow(log(c1 * a2 * a3 / c4), 4) * log(a5) - 1.0 / 120.0 * pow(log(c1 * a2 * a3 * a4 / c5), 5) + 0.5 * pow(log(c4 / c5), 2) * log(a3) * log(a2) * log(c1) - 1.0 / 6.0 * pow(log(c1 * a2 / c3), 3) * log(a4) * log(a5) + 0.5 * pow(log(c3 / c4), 2) * log(a5) * log(a2) * log(c1) + 1.0 / 6.0 * pow(log(c3 * a4 / c5), 3) * log(a2) * log(c1) + 1.0 / 12.0 * pow(log(c1 * a2 / c3), 3) * pow(log(c4 / c5), 2) - 0.5 * pow(log(c1 / c2), 2) * log(a3) * log(a4) * log(a5) + 1.0 / 6.0 * pow(log(c2 * a3 / c4), 3) * log(a5) * log(c1) + 1.0 / 24.0 * pow(log(c2 * a3 * a4 / c5), 4) * log(c1) + 1.0 / 4.0 * pow(log(c4 / c5), 2) * log(a3) * pow(log(c1 / c2), 2) + 0.5 * pow(log(c2 / c3), 2) * log(a4) * log(a5) * log(c1) + 1.0 / 4.0 * pow(log(c3 / c4), 2) * log(a5) * pow(log(c1 / c2), 2) + 1.0 / 12.0 * pow(log(c3 * a4 / c5), 3) * pow(log(c1 / c2), 2) - 1.0 / 4.0 * pow(log(c2 / c3), 2) * pow(log(c4 / c5), 2) * log(c1)); } else { //general case for K = 6 double eps = 1e-04; if (isEqualCpp(t2, 1)) t2 = t2 + eps; if (isEqualCpp(t3, 1)) t3 = t3 + eps; if (isEqualCpp(t4, 1)) t4 = t4 + eps; if (isEqualCpp(t5, 1)) t5 = t5 + eps; if (isEqualCpp(t6, 1)) t6 = t6 + eps; if (isEqualCpp(t2, t3)) t3 = t2 + eps; if (isEqualCpp(t2, t4)) t4 = t2 + eps; if (isEqualCpp(t2, t5)) t5 = t2 + eps; if (isEqualCpp(t2, t6)) t6 = t2 + eps; if (isEqualCpp(t3, t4)) t4 = t3 + eps; if (isEqualCpp(t3, t5)) t5 = t3 + eps; if (isEqualCpp(t3, t6)) t6 = t3 + eps; if (isEqualCpp(t4, t5)) t5 = t4 + eps; if (isEqualCpp(t4, t6)) t6 = t4 + eps; if (isEqualCpp(t5, t6)) t6 = t5 + eps; return piValue + pow(c6, (1 / t6)) * t6 / (t6 - t5) * ( t6 / (t6 - t4) * t6 / (t6 - t3) * t6 / (t6 - t2) * t6 / (t6 - 1) * pow(a5, (1 - t5 / t6)) * pow(a4, (1 - t4 / t6)) * pow(a3, (1 - t3 / t6)) * pow(a2, (1 - t2 / t6)) * (pow(a1, (1 - 1 / t6)) - pow(c1, (1 - 1 / t6))) - t6 / (t6 - t4) * t6 / (t6 - t3) * t6 / (t6 - t2) * t2 / (t2 - 1) * pow(a5, (1 - t5 / t6)) * pow(a4, (1 - t4 / t6)) * pow(a3, (1 - t3 / t6)) * pow(c2, (1 / t2 - 1 / t6)) * (pow(a1, (1 - 1 / t2)) - pow(c1, (1 - 1 / t2))) - t6 / (t6 - t4) * t6 / (t6 - t3) * t3 / (t3 - t2) * t3 / (t3 - 1) * pow(a5, (1 - t5 / t6)) * pow(a4, (1 - t4 / t6)) * pow(c3, (1 / t3 - 1 / t6)) * pow(a2, (1 - t2 / t3)) * (pow(a1, (1 - 1 / t3)) - pow(c1, (1 - 1 / t3))) + t6 / (t6 - t4) * t6 / (t6 - t3) * t3 / (t3 - t2) * t2 / (t2 - 1) * pow(a5, (1 - t5 / t6)) * pow(a4, (1 - t4 / t6)) * pow(c3, (1 / t3 - 1 / t6)) * pow(c2, (1 / t2 - 1 / t3)) * (pow(a1, (1 - 1 / t2)) - pow(c1, (1 - 1 / t2))) - t6 / (t6 - t4) * t4 / (t4 - t3) * t4 / (t4 - t2) * t4 / (t4 - 1) * pow(a5, (1 - t5 / t6)) * pow(c4, (1 / t4 - 1 / t6)) * pow(a3, (1 - t3 / t4)) * pow(a2, (1 - t2 / t4)) * (pow(a1, (1 - 1 / t4)) - pow(c1, (1 - 1 / t4))) + t6 / (t6 - t4) * t4 / (t4 - t3) * t4 / (t4 - t2) * t2 / (t2 - 1) * pow(a5, (1 - t5 / t6)) * pow(c4, (1 / t4 - 1 / t6)) * pow(a3, (1 - t3 / t4)) * pow(c2, (1 / t2 - 1 / t4)) * (pow(a1, (1 - 1 / t2)) - pow(c1, (1 - 1 / t2))) + t6 / (t6 - t4) * t4 / (t4 - t3) * t3 / (t3 - t2) * t3 / (t3 - 1) * pow(a5, (1 - t5 / t6)) * pow(c4, (1 / t4 - 1 / t6)) * pow(c3, (1 / t3 - 1 / t4)) * pow(a2, (1 - t2 / t3)) * (pow(a1, (1 - 1 / t3)) - pow(c1, (1 - 1 / t3))) - t6 / (t6 - t4) * t4 / (t4 - t3) * t3 / (t3 - t2) * t2 / (t2 - 1) * pow(a5, (1 - t5 / t6)) * pow(c4, (1 / t4 - 1 / t6)) * pow(c3, (1 / t3 - 1 / t4)) * pow(c2, (1 / t2 - 1 / t3)) * (pow(a1, (1 - 1 / t2)) - pow(c1, (1 - 1 / t2))) - t5 / (t5 - t4) * t5 / (t5 - t3) * t5 / (t5 - t2) * t5 / (t5 - 1) * pow(c5, (1 / t5 - 1 / t6)) * pow(a4, (1 - t4 / t5)) * pow(a3, (1 - t3 / t5)) * pow(a2, (1 - t2 / t5)) * (pow(a1, (1 - 1 / t5)) - pow(c1, (1 - 1 / t5))) + t5 / (t5 - t4) * t5 / (t5 - t3) * t5 / (t5 - t2) * t2 / (t2 - 1) * pow(c5, (1 / t5 - 1 / t6)) * pow(a4, (1 - t4 / t5)) * pow(a3, (1 - t3 / t5)) * pow(c2, (1 / t2 - 1 / t5)) * (pow(a1, (1 - 1 / t2)) - pow(c1, (1 - 1 / t2))) + t5 / (t5 - t4) * t5 / (t5 - t3) * t3 / (t3 - t2) * t3 / (t3 - 1) * pow(c5, (1 / t5 - 1 / t6)) * pow(a4, (1 - t4 / t5)) * pow(c3, (1 / t3 - 1 / t5)) * pow(a2, (1 - t2 / t3)) * (pow(a1, (1 - 1 / t3)) - pow(c1, (1 - 1 / t3))) - t5 / (t5 - t4) * t5 / (t5 - t3) * t3 / (t3 - t2) * t2 / (t2 - 1) * pow(c5, (1 / t5 - 1 / t6)) * pow(a4, (1 - t4 / t5)) * pow(c3, (1 / t3 - 1 / t5)) * pow(c2, (1 / t2 - 1 / t3)) * (pow(a1, (1 - 1 / t2)) - pow(c1, (1 - 1 / t2))) + t5 / (t5 - t4) * t4 / (t4 - t3) * t4 / (t4 - t2) * t4 / (t4 - 1) * pow(c5, (1 / t5 - 1 / t6)) * pow(c4, (1 / t4 - 1 / t5)) * pow(a3, (1 - t3 / t4)) * pow(a2, (1 - t2 / t4)) * (pow(a1, (1 - 1 / t4)) - pow(c1, (1 - 1 / t4))) - t5 / (t5 - t4) * t4 / (t4 - t3) * t4 / (t4 - t2) * t2 / (t2 - 1) * pow(c5, (1 / t5 - 1 / t6)) * pow(c4, (1 / t4 - 1 / t5)) * pow(a3, (1 - t3 / t4)) * pow(c2, (1 / t2 - 1 / t4)) * (pow(a1, (1 - 1 / t2)) - pow(c1, (1 - 1 / t2))) - t5 / (t5 - t4) * t4 / (t4 - t3) * t3 / (t3 - t2) * t3 / (t3 - 1) * pow(c5, (1 / t5 - 1 / t6)) * pow(c4, (1 / t4 - 1 / t5)) * pow(c3, (1 / t3 - 1 / t4)) * pow(a2, (1 - t2 / t3)) * (pow(a1, (1 - 1 / t3)) - pow(c1, (1 - 1 / t3))) + t5 / (t5 - t4) * t4 / (t4 - t3) * t3 / (t3 - t2) * t2 / (t2 - 1) * pow(c5, (1 / t5 - 1 / t6)) * pow(c4, (1 / t4 - 1 / t5)) * pow(c3, (1 / t3 - 1 / t4)) * pow(c2, (1 / t2 - 1 / t3)) * (pow(a1, (1 - 1 / t2)) - pow(c1, (1 - 1 / t2)))); } } double getFisherCombinationSizeApproximatelyKmax6Cpp(NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, double piValue) { return getFisherCombinationSizeApproximatelyKmax6Cpp( alpha0Vec, criticalValues, tVec, piValue, getFisherCombinationCaseKmax6Cpp(tVec)); } // [[Rcpp::export]] double getFisherCombinationSizeCpp(double kMax, NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, NumericVector cases) { if (criticalValues.length() < 1 || criticalValues.length() > C_KMAX_UPPER_BOUND_FISHER) { stop("length of 'criticalValues' (%d) is out of bounds [1; %d]", criticalValues.length(), C_KMAX_UPPER_BOUND_FISHER); } double piValue = criticalValues[0]; if (kMax > 1) { piValue = getFisherCombinationSizeKmax2Cpp(alpha0Vec, criticalValues, tVec, piValue, (int) cases[0]); } if (kMax > 2) { piValue = getFisherCombinationSizeKmax3Cpp(alpha0Vec, criticalValues, tVec, piValue, (int) cases[1]); } if (kMax > 3) { piValue = getFisherCombinationSizeApproximatelyKmax4Cpp(alpha0Vec, criticalValues, tVec, piValue, (int) cases[2]); } if (kMax > 4) { piValue = getFisherCombinationSizeApproximatelyKmax5Cpp(alpha0Vec, criticalValues, tVec, piValue, (int) cases[3]); } if (kMax > 5) { piValue = getFisherCombinationSizeApproximatelyKmax6Cpp(alpha0Vec, criticalValues, tVec, piValue, (int) cases[4]); } return piValue; } int getRejectValueForOneTrialCpp(int kMax, NumericVector alpha0, NumericVector criticalValues, NumericVector weightsFisher, int stage, NumericVector pValues) { if (stage < kMax && pValues[stage - 1] >= alpha0[stage - 1]) { return 0; } double p = 1; for (int i = 0; i < stage; i++) { p *= pow((double) pValues[i], (double) weightsFisher[i]); } return p < criticalValues[stage - 1] ? 1 : -1; } // [[Rcpp::export]] double getSimulatedAlphaCpp(int kMax, NumericVector alpha0, NumericVector criticalValues, NumericVector tVec, int iterations) { NumericVector weightsFisher = clone(tVec); weightsFisher.push_front(1); double var = 0; for (int i = 0; i < iterations; i++) { NumericVector pValues = runif(kMax); int rejectValue = 0; for (int stage = 1; stage <= kMax; stage++) { rejectValue = getRejectValueForOneTrialCpp( kMax, alpha0, criticalValues, weightsFisher, stage, pValues); if (rejectValue >= 0) { break; } } if (rejectValue > 0) { var += rejectValue; } } return var / iterations; } // [[Rcpp::export]] NumericVector getFisherCombinationCasesCpp(int kMax, NumericVector tVec) { if (kMax == 1) { return NumericVector(0); } NumericVector cases = {}; if (kMax > 1) { cases.push_back(getFisherCombinationCaseKmax2Cpp(tVec)); } if (kMax > 2) { cases.push_back(getFisherCombinationCaseKmax3Cpp(tVec)); } if (kMax > 3) { cases.push_back(getFisherCombinationCaseKmax4Cpp(tVec)); } if (kMax > 4) { cases.push_back(getFisherCombinationCaseKmax5Cpp(tVec)); } if (kMax > 5) { cases.push_back(getFisherCombinationCaseKmax6Cpp(tVec)); } return cases; } double getFisherCombinationSizeCpp(double kMax, NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec) { return getFisherCombinationSizeCpp(kMax, alpha0Vec, criticalValues, tVec, getFisherCombinationCasesCpp(kMax, tVec)); } // [[Rcpp::export]] List getDesignFisherTryCpp(int kMax, double alpha, double tolerance, NumericVector criticalValues, NumericVector scale, NumericVector alpha0Vec, NumericVector userAlphaSpending, String method) { NumericVector cases = getFisherCombinationCasesCpp(kMax, scale); NumericVector alphaSpent(kMax); NumericVector stageLevels(kMax); bool nonStochasticCurtailment; double size = 0; if (method == C_FISHER_METHOD_USER_DEFINED_ALPHA) { criticalValues[0] = userAlphaSpending[0]; alphaSpent = clone(criticalValues); if (kMax > 1) { for (int k = 2; k <= kMax; k++) { double cLower = 0; double cUpper = alpha; double prec = 1; while (prec > tolerance) { double alpha1 = (cLower + cUpper) * 0.5; criticalValues[k - 1] = alpha1; size = getFisherCombinationSizeCpp( k, rangeVector(alpha0Vec, 0, k - 2), criticalValues, scale, cases); if (size < userAlphaSpending[k - 1]) { cLower = alpha1; } else { cUpper = alpha1; } prec = cUpper - cLower; } } } } else { double prec = 1; double cLower = 0; double cUpper = alpha; double maxIter = 100; while (prec > tolerance && maxIter >= 0) { double alpha1 = (cLower + cUpper) * 0.5; if (method == C_FISHER_METHOD_EQUAL_ALPHA) { for (int k = 1; k <= kMax; k++) { criticalValues[k - 1] = zeroin([&](double c) { return getFisherCombinationSizeCpp( k, rep(1.0, k - 1), rep(c, k), scale, cases) - alpha1; }, tolerance, alpha, tolerance, 1000); } } else if (method == C_FISHER_METHOD_FULL_ALPHA) { for (int k = 0; k < kMax - 1; k++) { double prec2 = 1; double cLower2 = 0; double cUpper2 = alpha; double c = 0, y; while (prec2 > tolerance) { c = (cLower2 + cUpper2) * 0.5; y = getFisherCombinationSizeCpp(k + 1, rep(1.0, k), rep(c, k + 1), scale, cases); if (y < alpha1) { cLower2 = c; } else { cUpper2 = c; } prec2 = cUpper2 - cLower2; } criticalValues[k] = c; } criticalValues[kMax - 1] = zeroin([&](double c) { return getFisherCombinationSizeCpp(kMax, rep(1.0, kMax - 1), rep(c, kMax), scale, cases) - alpha; }, tolerance, alpha, tolerance, 1000); } else if (method == C_FISHER_METHOD_NO_INTERACTION) { criticalValues[kMax - 1] = zeroin([&](double c) { return getFisherCombinationSizeCpp(kMax, rep(1.0, kMax - 1), rep(c, kMax), scale, cases) - alpha; }, tolerance, alpha, tolerance, 1000); criticalValues[0] = alpha1; if (kMax < 2) Rcout << "error: kMax < 2"; for (int k = kMax - 1; k >= 2; k--) { criticalValues[k - 1] = criticalValues[k] / pow((double) alpha0Vec[k - 1], 1 / scale[k - 1]); } } else { throw std::invalid_argument("method in use is unkown. Use a valid method instead."); } size = getFisherCombinationSizeCpp(kMax, alpha0Vec, criticalValues, scale, cases); if (size < alpha) { cLower = alpha1; } else { cUpper = alpha1; } prec = cUpper - cLower; maxIter--; } } for (int k = 1; k <= kMax; k++) { stageLevels[k - 1] = getFisherCombinationSizeCpp( k, rep(1.0, k - 1), rep((double) criticalValues[k - 1], k), scale, cases); alphaSpent[k - 1] = getFisherCombinationSizeCpp( k, rangeVector(alpha0Vec, 0, k - 2), rangeVector(criticalValues, 0, k - 1), scale, cases); } nonStochasticCurtailment = stageLevels[0] < 1e-10; if (nonStochasticCurtailment) { for (int k = 1; k <= kMax; k++) { stageLevels[k - 1] = getFisherCombinationSizeCpp( k, rep(1.0, k - 1), rep((double) criticalValues[k - 1], k), scale, cases); alphaSpent[k - 1] = getFisherCombinationSizeCpp( k, rangeVector(alpha0Vec, 0, k - 2), rangeVector(criticalValues, 0, k - 1), scale, cases); } } return List::create( _["criticalValues"] = criticalValues, _["alphaSpent"] = alphaSpent, _["stageLevels"] = stageLevels, _["nonStochasticCurtailment"] = nonStochasticCurtailment, _["size"] = size); } rpact/src/f_design_group_sequential.cpp0000644000176200001440000012071214407257032020111 0ustar liggesusers/** * * -- Group sequential design -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 6890 $ * Last changed: $Date: 2023-03-24 09:23:55 +0100 (Fri, 24 Mar 2023) $ * Last changed by: $Author: pahlke $ * */ #include // [[Rcpp::plugins(cpp11)]] #include "f_utilities.h" #include "f_simulation_survival_utilities.h" using namespace Rcpp; const int C_MAX_NUMBER_OF_ITERATIONS = 100; const int C_UPPER_BOUNDS_DEFAULT = 8; const int C_CONST_NEWTON_COTES_2 = 15; const int C_CONST_NEWTON_COTES_4 = 8; const int C_NEWTON_COTES_MULTIPLIER = 6; const int C_NUMBER_OF_GRID_POINTS_ONE_SIDED = C_CONST_NEWTON_COTES_2 * C_NEWTON_COTES_MULTIPLIER + 1; const int C_NUMBER_OF_GRID_POINTS_TWO_SIDED = C_CONST_NEWTON_COTES_4 * C_NEWTON_COTES_MULTIPLIER + 1; const NumericVector C_NEWTON_COTES_VEC_4 = NumericVector::create(14, 32, 12, 32); const NumericVector C_NEWTON_COTES_VEC_5 = NumericVector::create(38, 75, 50, 50, 75); const NumericVector C_NEWTON_COTES_VEC_6 = NumericVector::create(82, 216, 27, 272, 27, 216); const double C_FUTILITY_BOUNDS_DEFAULT = -6; const String C_TYPE_OF_DESIGN_AS_USER = "asUser"; const String C_TYPE_OF_DESIGN_BS_USER = "bsUser"; const String C_TYPE_OF_DESIGN_AS_P = "asP"; const String C_TYPE_OF_DESIGN_BS_P = "bsP"; const String C_TYPE_OF_DESIGN_AS_OF = "asOF"; const String C_TYPE_OF_DESIGN_BS_OF = "bsOF"; const String C_TYPE_OF_DESIGN_AS_KD = "asKD"; const String C_TYPE_OF_DESIGN_BS_KD = "bsKD"; const String C_TYPE_OF_DESIGN_AS_HSD = "asHSD"; const String C_TYPE_OF_DESIGN_BS_HSD = "bsHSD"; const String C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY = "noEarlyEfficacy"; double dnorm2(const double x, const double mean, const double stDev) { static const double inv_sqrt_2pi = 0.3989422804014327; double a = (x - mean) / stDev; return inv_sqrt_2pi / stDev * exp(-0.5f * a * a); } double getDensityValue(double x, int k, NumericVector informationRates, NumericVector epsilonVec, NumericVector x2, NumericVector dn2, int n) { try { k--; double part1 = sqrt((double) informationRates[k - 1] / (double) epsilonVec[k - 1]); double sqrtInfRates1 = sqrt((double) informationRates[k - 1]); double sqrtInfRates2 = sqrt((double) informationRates[k - 2]); const double mean = 0; const double stDev = 1; double prod1 = x * sqrtInfRates1; double divisor = sqrt((double) epsilonVec[k - 1]); double resultValue = 0; for (int i = 0; i < n; i++) { double dnormValue = dnorm2((prod1 - (x2[i] * sqrtInfRates2)) / divisor, mean, stDev); double prod = part1 * dnormValue * dn2[i]; resultValue += prod; } return resultValue; } catch (const std::exception& e) { throw Exception("Failed to get density value (x = %f, k = %i, n = %i): %s", x, k, n, e.what()); } } NumericVector getDensityValues(NumericVector x, int k, NumericVector informationRates, NumericVector epsilonVec, NumericVector x2, NumericVector dn2) { try { int n = x.size(); NumericVector results = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { if (k == 2) { results[i] = dnorm2((double) x[i], 0.0, 1.0); } else { results[i] = getDensityValue((double) x[i], k, informationRates, epsilonVec, x2, dn2, n); } } return results; } catch (const std::exception& e) { throw Exception("Failed to get density values (k = %i): %s", k, e.what()); } } NumericVector getW(double dx, int constNewtonCotes) { try { NumericVector vec; double x; if (C_NEWTON_COTES_MULTIPLIER == 4) { vec = vectorMultiply(C_NEWTON_COTES_VEC_4, dx / 90.0); vec = 4 * rep(vec, constNewtonCotes); x = 28.0 * dx / 90.0; } else if (C_NEWTON_COTES_MULTIPLIER == 5) { vec = vectorMultiply(C_NEWTON_COTES_VEC_5, dx / 288.0); vec = 5 * rep(vec, constNewtonCotes); x = 95.0 * dx / 288.0; } else if (C_NEWTON_COTES_MULTIPLIER == 6) { vec = vectorMultiply(C_NEWTON_COTES_VEC_6, dx / 840.0); vec = 6 * rep(vec, constNewtonCotes); x = 246.0 * dx / 840.0; } NumericVector result = NumericVector(vec.size() + 1, NA_REAL); result[0] = x; for (int i = 1; i < vec.size(); i++) { result[i] = vec[i]; } result[result.size() - 1] = x; return result; } catch (const std::exception& e) { throw Exception("Failed to get W (dx = %f, constNewtonCotes = %i): %s", dx, constNewtonCotes, e.what()); } } double getSeqValue(int paramIndex, int k, NumericVector dn, NumericVector x, NumericMatrix decisionMatrix, NumericVector informationRates, NumericVector epsilonVec) { try { int kIndex = k - 1; NumericVector vec = NumericVector(x.size(), NA_REAL); for (int i = 0; i < x.size(); i++) { vec[i] = (decisionMatrix(paramIndex, kIndex) * sqrt((double) informationRates[kIndex]) - x[i] * sqrt((double) informationRates[kIndex - 1])) / sqrt((double) epsilonVec[kIndex]); } vec = pnorm(as(vec)); return vectorProduct(vec, dn); } catch (const std::exception& e) { throw Exception("Failed to get sequence values (paramIndex = %i, k = %i): %s", paramIndex, k, e.what()); } } double getDxValue(NumericMatrix decisionMatrix, int k, int numberOfGridPoints, int rowIndex) { try { return (decisionMatrix(rowIndex + 1, k - 2) - decisionMatrix(rowIndex, k - 2)) / (numberOfGridPoints - 1); } catch (const std::exception& e) { throw Exception("Failed to get dx value (k = %d, numberOfGridPoints = %d, rowIndex = %d): %s", k, numberOfGridPoints, rowIndex, e.what()); } } NumericVector getXValues(NumericMatrix decisionMatrix, int k, int numberOfGridPoints, int rowIndex) { try { NumericVector x = rep(decisionMatrix(rowIndex, k - 2), numberOfGridPoints); double dx = getDxValue(decisionMatrix, k, numberOfGridPoints, rowIndex); for (int i = 0; i < x.size(); i++) { x[i] = x[i] + i * dx; } return x; } catch (const std::exception& e) { throw Exception("Failed to get x values (k = %d, numberOfGridPoints = %d, rowIndex = %d): %s", k, numberOfGridPoints, rowIndex, e.what()); } } NumericVector getGroupSequentialProbabilitiesFast( NumericMatrix decisionMatrix, NumericVector informationRates) { // maximum number of stages int kMax = informationRates.size(); // probability matrix output NumericVector probs(kMax); double decValue = decisionMatrix(0, 0); if (decValue > C_UPPER_BOUNDS_DEFAULT) { decValue = C_UPPER_BOUNDS_DEFAULT; } probs[0] = getNormalDistribution(decValue); if (kMax == 1) { return probs; } NumericVector epsilonVec = NumericVector(informationRates.size(), NA_REAL); epsilonVec[0] = informationRates[0]; for (int i = 1; i < epsilonVec.size(); i++) { epsilonVec[i] = informationRates[i] - informationRates[i - 1]; } NumericMatrix decMatrix(Rcpp::clone(decisionMatrix)); for (int i = 0; i < decMatrix.nrow(); i++) { for (int j = 0; j < decMatrix.ncol(); j++) { if (decMatrix(i, j) < C_FUTILITY_BOUNDS_DEFAULT) { decMatrix(i, j) = C_FUTILITY_BOUNDS_DEFAULT; } } } // density values in recursion NumericVector dn2 = NumericVector(C_NUMBER_OF_GRID_POINTS_ONE_SIDED, NA_REAL); // grid points in recursion NumericVector x2 = NumericVector(C_NUMBER_OF_GRID_POINTS_ONE_SIDED, NA_REAL); for (int k = 2; k <= kMax; k++) { double dx = getDxValue(decMatrix, k, C_NUMBER_OF_GRID_POINTS_ONE_SIDED, 0); NumericVector x = getXValues(decMatrix, k, C_NUMBER_OF_GRID_POINTS_ONE_SIDED, 0); NumericVector w = getW(dx, C_CONST_NEWTON_COTES_2); NumericVector densityValues = getDensityValues(x, k, informationRates, epsilonVec, x2, dn2); NumericVector dn = vectorMultiply(w, densityValues); double seq1 = getSeqValue(0, k, dn, x, decMatrix, informationRates, epsilonVec); x2 = x; dn2 = dn; probs[k - 1] = seq1; } return probs; } // [[Rcpp::export]] NumericMatrix getGroupSequentialProbabilitiesCpp( NumericMatrix decisionMatrix, NumericVector informationRates) { try { NumericMatrix decMatrix(Rcpp::clone(decisionMatrix)); for (int i = 0; i < decMatrix.nrow(); i++) { for (int j = 0; j < decMatrix.ncol(); j++) { if (decMatrix(i, j) >= C_UPPER_BOUNDS_DEFAULT) { decMatrix(i, j) = C_UPPER_BOUNDS_DEFAULT; } } } // maximum number of stages int kMax = informationRates.size(); // probability matrix output NumericMatrix probs(decMatrix.nrow() + 1, kMax); NumericVector pnormValues = pnorm(decMatrix(_, 0)); for (int i = 0; i < pnormValues.size(); i++) { probs(i, 0) = pnormValues[i]; } probs(probs.nrow() - 1, 0) = 1; if (kMax <= 1) { return probs; } NumericVector epsilonVec = NumericVector(informationRates.size(), NA_REAL); epsilonVec[0] = informationRates[0]; for (int i = 1; i < epsilonVec.size(); i++) { epsilonVec[i] = informationRates[i] - informationRates[i - 1]; } if (decMatrix.nrow() == 2) { for (int i = 0; i < decMatrix.nrow(); i++) { for (int j = 0; j < decMatrix.ncol(); j++) { if (decMatrix(i, j) <= C_FUTILITY_BOUNDS_DEFAULT) { decMatrix(i, j) = C_FUTILITY_BOUNDS_DEFAULT; } } } // density values in recursion NumericVector dn2 = NumericVector(C_NUMBER_OF_GRID_POINTS_ONE_SIDED, NA_REAL); // grid points in recursion NumericVector x2 = NumericVector(C_NUMBER_OF_GRID_POINTS_ONE_SIDED, NA_REAL); for (int k = 2; k <= kMax; k++) { double dx = getDxValue(decMatrix, k, C_NUMBER_OF_GRID_POINTS_ONE_SIDED, 0); NumericVector x = getXValues(decMatrix, k, C_NUMBER_OF_GRID_POINTS_ONE_SIDED, 0); NumericVector w = getW(dx, C_CONST_NEWTON_COTES_2); NumericVector densityValues = getDensityValues(x, k, informationRates, epsilonVec, x2, dn2); NumericVector dn = vectorMultiply(w, densityValues); double seq1 = getSeqValue(0, k, dn, x, decMatrix, informationRates, epsilonVec); double seq2 = getSeqValue(1, k, dn, x, decMatrix, informationRates, epsilonVec); x2 = x; dn2 = dn; probs(0, k - 1) = seq1; probs(1, k - 1) = seq2; probs(2, k - 1) = probs(1, k - 2) - probs(0, k - 2); } } else if (decMatrix.nrow() == 4) { for (int i = 0; i < decMatrix.nrow(); i++) { for (int j = 0; j < decMatrix.ncol(); j++) { if (decMatrix(i, j) <= -C_UPPER_BOUNDS_DEFAULT) { decMatrix(i, j) = -C_UPPER_BOUNDS_DEFAULT; } } } // density values in recursion NumericVector dn2 = NumericVector(2 * C_NUMBER_OF_GRID_POINTS_TWO_SIDED, NA_REAL); // grid points in recursion NumericVector x2 = NumericVector(2 * C_NUMBER_OF_GRID_POINTS_TWO_SIDED, NA_REAL); for (int k = 2; k <= kMax; k++) { double dx0 = getDxValue(decMatrix, k, C_NUMBER_OF_GRID_POINTS_TWO_SIDED, 0); double dx1 = getDxValue(decMatrix, k, C_NUMBER_OF_GRID_POINTS_TWO_SIDED, 2); NumericVector x0 = getXValues(decMatrix, k, C_NUMBER_OF_GRID_POINTS_TWO_SIDED, 0); NumericVector x1 = getXValues(decMatrix, k, C_NUMBER_OF_GRID_POINTS_TWO_SIDED, 2); NumericVector x = concat(x0, x1); NumericVector w0 = getW(dx0, C_CONST_NEWTON_COTES_4); NumericVector w1 = getW(dx1, C_CONST_NEWTON_COTES_4); NumericVector w = concat(w0, w1); NumericVector densityValues = getDensityValues(x, k, informationRates, epsilonVec, x2, dn2); NumericVector dn = vectorMultiply(w, densityValues); double seq1 = getSeqValue(0, k, dn, x, decMatrix, informationRates, epsilonVec); double seq2 = getSeqValue(1, k, dn, x, decMatrix, informationRates, epsilonVec); double seq3 = getSeqValue(2, k, dn, x, decMatrix, informationRates, epsilonVec); double seq4 = getSeqValue(3, k, dn, x, decMatrix, informationRates, epsilonVec); x2 = x; dn2 = dn; probs(0, k - 1) = seq1; probs(1, k - 1) = seq2; probs(2, k - 1) = seq3; probs(3, k - 1) = seq4; probs(4, k - 1) = probs(3, k - 2) - probs(2, k - 2) + probs(1, k - 2) - probs(0, k - 2); } } return probs; } catch (const std::exception& e) { throw Exception("Failed to get group sequential probabilities: %s", e.what()); } } // [[Rcpp::export]] List getDesignGroupSequentialPampallonaTsiatisCpp( double tolerance, double beta, double alpha, double kMax, double deltaPT0, double deltaPT1, NumericVector informationRates, int sided, bool bindingFutility) { NumericVector futilityBounds(kMax); NumericVector rejectionBounds(kMax); NumericMatrix probs(5, kMax); int rows = sided == 1 ? 2 : 4; double size; double delst; double power; NumericMatrix helper(rows, kMax); NumericVector sqrtInformationRates = sqrt(informationRates); NumericVector deltaPT0KMaxInformationRates = pow(informationRates * kMax, deltaPT0 - 0.5); NumericVector deltaPT1KMaxInformationRates = pow(informationRates * kMax, deltaPT1 - 0.5); double pow1 = pow(kMax, deltaPT0 - 0.5); double pow2 = pow(kMax, deltaPT1 - 0.5); if (bindingFutility) { NumericMatrix decisionMatrix(rows, kMax); bizero([&](double c2m) { bizero([&](double c1m) { delst = c2m * pow1 + c1m * pow2; futilityBounds = sqrtInformationRates * delst - deltaPT0KMaxInformationRates * c2m; rejectionBounds = deltaPT1KMaxInformationRates * c1m; for (int i = 0; i < futilityBounds.length(); i++) { if (futilityBounds[i] > rejectionBounds[i]) { futilityBounds[i] = rejectionBounds[i]; } if (sided == 2 && futilityBounds[i] < 0) { futilityBounds[i] = 0; } } if (sided == 1) { decisionMatrix.row(0) = futilityBounds; decisionMatrix.row(1) = rejectionBounds; } else { decisionMatrix.row(0) = -rejectionBounds; decisionMatrix.row(1) = -futilityBounds; decisionMatrix.row(2) = futilityBounds; decisionMatrix.row(3) = rejectionBounds; } probs = getGroupSequentialProbabilitiesCpp(decisionMatrix, informationRates); if (sided == 1) { size = sum(probs.row(2) - probs.row(1)); } else { size = sum(probs.row(4) - probs.row(3) + probs.row(0)); } return size - alpha; }, 0, 10, tolerance, C_MAX_NUMBER_OF_ITERATIONS); for (int i = 0; i < rows; i++) { helper.row(i) = sqrtInformationRates * delst; } NumericMatrix decisionMatrixH1 = matrixSub(decisionMatrix, helper); probs = getGroupSequentialProbabilitiesCpp(decisionMatrixH1, informationRates); if (sided == 1) { power = sum(probs.row(2) - probs.row(1)); } else { power = sum(probs.row(4) - probs.row(3) + probs.row(0)); } return 1.0 - beta - power; }, 0, 10, tolerance, C_MAX_NUMBER_OF_ITERATIONS); } else { // non-binding double c1m = 0; bizero([&](double x) { c1m = x; rejectionBounds = deltaPT1KMaxInformationRates * c1m; NumericMatrix decisionMatrix(2, kMax); if (sided == 1) { decisionMatrix.row(0) = rep(-6, kMax); } else { decisionMatrix.row(0) = -rejectionBounds; } decisionMatrix.row(1) = rejectionBounds; probs = getGroupSequentialProbabilitiesCpp(decisionMatrix, informationRates); size = sum(probs.row(2) - probs.row(1)); if (sided != 1) { size += sum(probs.row(0)); } return size - alpha; }, 0, 10, tolerance, C_MAX_NUMBER_OF_ITERATIONS); rejectionBounds = deltaPT1KMaxInformationRates * c1m; bizero([&](double c2m) { delst = c2m * pow1 + c1m * pow2; futilityBounds = sqrtInformationRates * delst - deltaPT0KMaxInformationRates * c2m; for (int i = 0; i < futilityBounds.length(); i++) { if (futilityBounds[i] > rejectionBounds[i]) { futilityBounds[i] = rejectionBounds[i]; } } NumericMatrix decisionMatrix(rows,kMax); if (sided == 1) { decisionMatrix.row(0) = futilityBounds; decisionMatrix.row(1) = rejectionBounds; } else { for (int i = 0; i < futilityBounds.length(); i++) { if (futilityBounds[i] < 0) { futilityBounds[i] = 0; } } decisionMatrix.row(0) = -rejectionBounds; decisionMatrix.row(1) = -futilityBounds; decisionMatrix.row(2) = futilityBounds; decisionMatrix.row(3) = rejectionBounds; } for (int i = 0; i < helper.nrow();i++) { helper.row(i) = sqrtInformationRates * delst; } NumericMatrix decisionMatrixH1 = matrixSub(decisionMatrix, helper); probs = getGroupSequentialProbabilitiesCpp(decisionMatrixH1, informationRates); if (sided == 1) { power = sum(probs.row(2) - probs.row(1)); } else { power = sum(probs.row(4) + probs.row(0) - probs.row(3)); } return 1.0 - beta - power; }, 0, 10, tolerance, C_MAX_NUMBER_OF_ITERATIONS); } return List::create( _["futilityBounds"] = futilityBounds, _["criticalValues"] = rejectionBounds, _["probs"] = probs ); } NumericMatrix getDecisionMatrixOneSided( NumericVector criticalValues, NumericVector futilityBounds, bool bindingFutility) { int kMax = criticalValues.length(); NumericMatrix decisionMatrix(2, kMax); if (bindingFutility) { // add C_FUTILITY_BOUNDS_DEFAULT at the end of the vector, after its current last element NumericVector futilityBoundsTemp = Rcpp::clone(futilityBounds); if (futilityBoundsTemp.length() < kMax) { futilityBoundsTemp.push_back(C_FUTILITY_BOUNDS_DEFAULT); } decisionMatrix(0, _) = futilityBoundsTemp; decisionMatrix(1, _) = criticalValues; } else { decisionMatrix(0, _) = rep(C_FUTILITY_BOUNDS_DEFAULT, kMax); decisionMatrix(1, _) = criticalValues; } return decisionMatrix; } NumericMatrix getDecisionMatrixTwoSided(NumericVector criticalValues) { NumericMatrix decisionMatrix(2, criticalValues.length()); decisionMatrix(0, _) = -criticalValues; decisionMatrix(1, _) = criticalValues; return decisionMatrix; } NumericMatrix getDecisionMatrixSubset(NumericMatrix decisionMatrix, int k) { NumericMatrix decisionMatrixSubset(decisionMatrix.nrow(), k); for (int i = 0; i < k; i++) { decisionMatrixSubset(_, i) = decisionMatrix(_, i); } return decisionMatrixSubset; } NumericMatrix getDecisionMatrix( NumericVector criticalValues, NumericVector futilityBounds, bool bindingFutility, int sided, int k = -1) { NumericMatrix decisionMatrix; if (sided == 1) { decisionMatrix = getDecisionMatrixOneSided(criticalValues, futilityBounds, bindingFutility); } else { decisionMatrix = getDecisionMatrixTwoSided(criticalValues); } if (k < 0) { return decisionMatrix; } return getDecisionMatrixSubset(decisionMatrix, k); } double getZeroApproximation(NumericMatrix probs, double alpha, int sided) { if (sided == 1) { return sum(probs(2, _) - probs(1, _)) - alpha; } return sum(probs(2, _) - probs(1, _) + probs(0, _)) - alpha; } double getSpendingValueCpp(double alpha, double x, double sided, String typeOfDesign, double gamma) { if (typeOfDesign == C_TYPE_OF_DESIGN_AS_P || typeOfDesign == C_TYPE_OF_DESIGN_BS_P) { return alpha * log(1 + (exp(1) - 1) * x); } if (typeOfDesign == C_TYPE_OF_DESIGN_AS_OF) { return 2 * sided * (getOneMinusPNorm(getOneMinusQNorm(alpha / (2 * sided)) / sqrt(x))); } if (typeOfDesign == C_TYPE_OF_DESIGN_BS_OF) { return 2 * (getOneMinusPNorm(getOneMinusQNorm(alpha / 2) / sqrt(x))); } if (typeOfDesign == C_TYPE_OF_DESIGN_AS_KD || typeOfDesign == C_TYPE_OF_DESIGN_BS_KD) { return alpha * pow(x, gamma); } if (typeOfDesign == C_TYPE_OF_DESIGN_AS_HSD || typeOfDesign == C_TYPE_OF_DESIGN_BS_HSD) { if (gamma == 0) { return alpha * x; } return alpha * (1 - exp(-gamma * x)) / (1 - exp(-gamma)); } return NA_REAL; } double getCriticalValue( int k, NumericVector criticalValues, NumericVector userAlphaSpending, double alpha, double gammaA, String typeOfDesign, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance) { double alphaSpendingValue; if (typeOfDesign == C_TYPE_OF_DESIGN_AS_USER || typeOfDesign == C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY) { alphaSpendingValue = userAlphaSpending[k - 1]; } else { alphaSpendingValue = getSpendingValueCpp(alpha, (double) informationRates[k - 1], sided, typeOfDesign, gammaA); } if (k == 1) { return(getOneMinusQNorm(alphaSpendingValue / sided)); } double criticalValue = NA_REAL; NumericVector criticalValuesTemp = Rcpp::clone(criticalValues); bisection2([&](double scale) { criticalValue = scale; criticalValuesTemp[k - 1] = criticalValue; NumericMatrix decisionMatrix = getDecisionMatrix( criticalValuesTemp, futilityBounds, bindingFutility, sided, k); NumericMatrix probs = getGroupSequentialProbabilitiesCpp( decisionMatrix, rangeVector(informationRates, 0, k - 1)); return getZeroApproximation(probs, alphaSpendingValue, sided); }, 0.0, 8.0, tolerance, C_MAX_NUMBER_OF_ITERATIONS); return criticalValue; } NumericVector getDesignGroupSequentialAlphaSpending( int kMax, NumericVector userAlphaSpending, double alpha, double gammaA, String typeOfDesign, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance) { NumericVector criticalValues = NumericVector(kMax, NA_REAL); for (int k = 1; k <= kMax; k++) { criticalValues[k - 1] = getCriticalValue( k, criticalValues, userAlphaSpending, alpha, gammaA, typeOfDesign, sided, informationRates, bindingFutility, futilityBounds, tolerance); } return criticalValues; } // [[Rcpp::export]] NumericVector getDesignGroupSequentialUserDefinedAlphaSpendingCpp( int kMax, NumericVector userAlphaSpending, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance) { return getDesignGroupSequentialAlphaSpending( kMax, userAlphaSpending, NA_REAL, NA_REAL, C_TYPE_OF_DESIGN_AS_USER, sided, informationRates, bindingFutility, futilityBounds, tolerance); } // [[Rcpp::export]] NumericVector getDesignGroupSequentialAlphaSpendingCpp( int kMax, double alpha, double gammaA, String typeOfDesign, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance) { return getDesignGroupSequentialAlphaSpending( kMax, NumericVector(0), alpha, gammaA, typeOfDesign, sided, informationRates, bindingFutility, futilityBounds, tolerance); } // [[Rcpp::export]] NumericVector getDesignGroupSequentialDeltaWTCpp( int kMax, double alpha, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance, double deltaWT) { NumericVector criticalValues(kMax); double scale = bizero([&](double scale) { for (int k = 0; k < kMax; k++) { criticalValues[k] = scale * pow((double) informationRates[k], deltaWT - 0.5); } NumericMatrix decisionMatrix = getDecisionMatrix( criticalValues, futilityBounds, bindingFutility, sided); NumericMatrix probs = getGroupSequentialProbabilitiesCpp( decisionMatrix, informationRates); return getZeroApproximation(probs, alpha, sided); }, 0.0, 8.0, tolerance, C_MAX_NUMBER_OF_ITERATIONS); for (int k = 0; k < kMax; k++) { criticalValues[k] = scale * pow((double) informationRates[k], deltaWT - 0.5); } return criticalValues; } // [[Rcpp::export]] NumericVector getDesignGroupSequentialPocockCpp( int kMax, double alpha, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance) { return getDesignGroupSequentialDeltaWTCpp( kMax, alpha, sided, informationRates, bindingFutility, futilityBounds, tolerance, 0.5); } // [[Rcpp::export]] NumericVector getDesignGroupSequentialOBrienAndFlemingCpp( int kMax, double alpha, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance) { return getDesignGroupSequentialDeltaWTCpp( kMax, alpha, sided, informationRates, bindingFutility, futilityBounds, tolerance, 0); } NumericMatrix getDecisionMatrixForFutilityBounds( NumericVector informationRates, NumericVector criticalValues, NumericVector futilityBoundsTemp, double shift, double sided) { int kMax = criticalValues.length(); if (futilityBoundsTemp.length() < kMax) { futilityBoundsTemp.push_back(C_FUTILITY_BOUNDS_DEFAULT); } if (sided == 1) { NumericMatrix decisionMatrix(2, kMax); decisionMatrix(0, _) = futilityBoundsTemp - sqrt(informationRates) * shift; decisionMatrix(1, _) = criticalValues - sqrt(informationRates) * shift; return decisionMatrix; } NumericMatrix decisionMatrix(4, kMax); decisionMatrix(0, _) = -criticalValues - sqrt(informationRates) * shift; decisionMatrix(1, _) = -futilityBoundsTemp - sqrt(informationRates) * shift; decisionMatrix(2, _) = futilityBoundsTemp - sqrt(informationRates) * shift; decisionMatrix(3, _) = criticalValues - sqrt(informationRates) * shift; return decisionMatrix; } double getFutilityBoundOneSided(int k, NumericVector betaSpendingValues, NumericVector informationRates, NumericVector futilityBounds, NumericVector criticalValues, double shift, double tolerance) { if (k == 1) { return getQNorm((double) betaSpendingValues[0]) + sqrt((double) informationRates[0]) * shift; } double futilityBound = NA_REAL; NumericVector futilityBoundsTemp = Rcpp::clone(futilityBounds); NumericVector probs; NumericMatrix decisionMatrix; bisection2([&](double scale) { futilityBound = scale; futilityBoundsTemp[k - 1] = futilityBound; decisionMatrix = getDecisionMatrixForFutilityBounds( informationRates, criticalValues, futilityBoundsTemp, shift, 1); probs = getGroupSequentialProbabilitiesFast( getDecisionMatrixSubset(decisionMatrix, k), rangeVector(informationRates, 0, k - 1)); return (double) betaSpendingValues[k - 1] - sum(probs); }, -6.0, 5.0, tolerance, C_MAX_NUMBER_OF_ITERATIONS); return futilityBound; } NumericVector getFutilityBoundsOneSided(int kMax, NumericVector betaSpendingValues, NumericVector informationRates, NumericVector criticalValues, double shift, double tolerance) { NumericVector futilityBounds = NumericVector(kMax, NA_REAL); for (int k = 1; k <= kMax; k++) { futilityBounds[k - 1] = getFutilityBoundOneSided(k, betaSpendingValues, informationRates, futilityBounds, criticalValues, shift, tolerance); } return futilityBounds; } NumericMatrix getProbabilitiesForFutilityBounds( NumericVector informationRates, NumericVector criticalValues, NumericVector futilityBounds, double shift, int k, double sided) { NumericMatrix decisionMatrix = getDecisionMatrixForFutilityBounds( informationRates, criticalValues, futilityBounds, shift, sided); return getGroupSequentialProbabilitiesCpp( getDecisionMatrixSubset(decisionMatrix, k), rangeVector(informationRates, 0, k - 1)); } List getDesignGroupSequentialBetaSpendingOneSidedCpp( NumericVector criticalValues, int kMax, NumericVector userAlphaSpending, NumericVector userBetaSpending, NumericVector informationRates, bool bindingFutility, double tolerance, String typeOfDesign, String typeBetaSpending, double gammaA, double gammaB, double alpha, double beta ) { double sided = 1.0; criticalValues = Rcpp::clone(criticalValues); if (typeOfDesign == C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY) { for (int k = 0; k < kMax - 1; k++) { userAlphaSpending[k] = 0; criticalValues[k] = getQNormThreshold(); } userAlphaSpending[kMax - 1] = alpha; criticalValues[kMax - 1] = getOneMinusQNorm(alpha / sided); } NumericVector betaSpendingValues; if (typeBetaSpending == C_TYPE_OF_DESIGN_BS_USER) { betaSpendingValues = userBetaSpending; } else { betaSpendingValues = NumericVector(kMax, NA_REAL); for (int k = 0; k < kMax; k++) { betaSpendingValues[k] = getSpendingValueCpp(beta, (double) informationRates[k], sided, typeBetaSpending, gammaB); } } NumericVector futilityBounds; double shiftResult; if (!bindingFutility) { shiftResult = bizero([&](double shift) { futilityBounds = getFutilityBoundsOneSided(kMax, betaSpendingValues, informationRates, criticalValues, shift, tolerance); return (double) futilityBounds[kMax - 1] - (double) criticalValues[kMax - 1]; }, -4.0, 10.0, tolerance, C_MAX_NUMBER_OF_ITERATIONS); } else { futilityBounds = NumericVector(kMax, NA_REAL); shiftResult = bisection2([&](double shift) { for (int k = 1; k <= kMax; k++) { if (typeOfDesign != C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY) { criticalValues[k - 1] = getCriticalValue( k, criticalValues, userAlphaSpending, alpha, gammaA, typeOfDesign, sided, informationRates, bindingFutility, futilityBounds, tolerance); } futilityBounds[k - 1] = getFutilityBoundOneSided(k, betaSpendingValues, informationRates, futilityBounds, criticalValues, shift, tolerance); } return (double) criticalValues[kMax - 1] - (double) futilityBounds[kMax - 1]; }, -4.0, 10.0, tolerance, C_MAX_NUMBER_OF_ITERATIONS); } NumericMatrix probs = getProbabilitiesForFutilityBounds(informationRates, criticalValues, futilityBounds, shiftResult, kMax, sided); NumericVector betaSpent = cumsum(probs(0, _)); NumericVector power = cumsum(probs(2, _) - probs(1, _)); futilityBounds = rangeVector(futilityBounds, 0, kMax - 2); return List::create( _["futilityBounds"] = futilityBounds, _["criticalValues"] = criticalValues, _["betaSpent"] = betaSpent, _["power"] = power, _["shift"] = shiftResult ); } int getFirstIndexOfValuLargerZero(NumericVector vec) { for (int i = 0; i < vec.size(); i++) { if (!R_IsNA((double) vec[i]) && vec[i] > 0) { return i; } } return -1; } // Add additional option betaAdjustment for group sequential design (default = FALSE) NumericVector getAdjustedBetaSpendingValues( int kMax, int kMin, NumericVector betaSpendingValues, bool betaAdjustment) { if (kMin <= 0) { return betaSpendingValues; } NumericVector betaSpendingValuesAdjusted = Rcpp::clone(betaSpendingValues); for (int k = 0; k < kMin; k++) { betaSpendingValuesAdjusted[k] = 0; } if (betaAdjustment) { for (int k = kMin - 1; k < kMax; k++) { betaSpendingValuesAdjusted[k] = (betaSpendingValues[k] - betaSpendingValues[kMin - 1]) / (betaSpendingValues[kMax - 1] - betaSpendingValues[kMin - 1]) * betaSpendingValues[kMax - 1]; } } return betaSpendingValuesAdjusted; } double getFutilityBoundTwoSided( int k, NumericVector betaSpendingValues, NumericVector informationRates, NumericVector futilityBounds, NumericVector futilityBoundsOneSided, NumericVector criticalValues, double shift, double tolerance) { if (k == 1) { double futilityBound = bizero([&](double u) { return getNormalDistribution(u - sqrt((double) informationRates[0]) * shift) - getNormalDistribution(-u - sqrt((double) informationRates[0]) * shift) - betaSpendingValues[0]; }, -8.0, 8.0, tolerance, C_MAX_NUMBER_OF_ITERATIONS); if (futilityBound > criticalValues[0]) { futilityBound = criticalValues[0]; } if (futilityBoundsOneSided[0] < 0) { futilityBound = 0; } return futilityBound; } double futilityBound = NA_REAL; double futilityBoundOneSided = 1; if (k <= futilityBoundsOneSided.length()) { futilityBoundOneSided = futilityBoundsOneSided[k - 1]; } NumericVector futilityBoundsTemp = Rcpp::clone(futilityBounds); NumericMatrix decisionMatrix; bizero([&](double scale) { futilityBound = scale; if (futilityBound > criticalValues[k - 1]) { futilityBound = criticalValues[k - 1]; } if (futilityBoundOneSided < 0) { futilityBound = 0; } futilityBoundsTemp[k - 1] = futilityBound; decisionMatrix = getDecisionMatrixForFutilityBounds( informationRates, criticalValues, futilityBoundsTemp, shift, 2); NumericMatrix probs = getGroupSequentialProbabilitiesCpp( decisionMatrix(_, Range(0, k - 1)), informationRates[Range(0, k - 1)]); double probsSum = sum(probs.row(2) - probs.row(1)); return (double) betaSpendingValues[k - 1] - probsSum; }, -6.0, 5.0, tolerance, C_MAX_NUMBER_OF_ITERATIONS); return futilityBound; } NumericVector getFutilityBoundsTwoSided( int kMax, NumericVector betaSpendingValues, NumericVector informationRates, NumericVector futilityBoundsOneSided, NumericVector criticalValues, double shift, double tolerance) { NumericVector futilityBounds = NumericVector(kMax, NA_REAL); for (int k = 1; k <= kMax; k++) { futilityBounds[k - 1] = getFutilityBoundTwoSided( k, betaSpendingValues, informationRates, futilityBounds, futilityBoundsOneSided, criticalValues, shift, tolerance); } return futilityBounds; } double getCriticalValueTwoSided( int kMax, int k, NumericVector criticalValues, NumericVector userAlphaSpending, double alpha, double gammaA, String typeOfDesign, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance) { double sided = 2.0; double alphaSpendingValue; if (typeOfDesign == C_TYPE_OF_DESIGN_AS_USER || typeOfDesign == C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY) { alphaSpendingValue = userAlphaSpending[k - 1]; } else { alphaSpendingValue = getSpendingValueCpp(alpha, (double) informationRates[k - 1], sided, typeOfDesign, gammaA); } if (k == 1) { return(getOneMinusQNorm(alphaSpendingValue / sided)); } double criticalValue = NA_REAL; NumericVector criticalValuesTemp = Rcpp::clone(criticalValues); bisection2([&](double scale) { criticalValue = scale; criticalValuesTemp[k - 1] = criticalValue; NumericMatrix decisionMatrix(4, futilityBounds.length()); decisionMatrix(0, _) = -criticalValuesTemp; decisionMatrix(1, _) = -futilityBounds; decisionMatrix(2, _) = futilityBounds; decisionMatrix(3, _) = criticalValuesTemp; NumericMatrix probs = getGroupSequentialProbabilitiesCpp( decisionMatrix(_, Range(0, k - 1)), informationRates[Range(0, k - 1)]); return sum(probs(4, _) - probs(3, _) + probs(0, _)) - alphaSpendingValue; }, 0.0, 8.0, tolerance, C_MAX_NUMBER_OF_ITERATIONS); return criticalValue; } List getDesignGroupSequentialBetaSpendingTwoSidedCpp( NumericVector criticalValues, int kMax, NumericVector userAlphaSpending, NumericVector userBetaSpending, NumericVector informationRates, bool bindingFutility, double tolerance, String typeOfDesign, String typeBetaSpending, double gammaA, double gammaB, double alpha, double beta, bool betaAdjustment, bool twoSidedPower ) { double sided = 2; criticalValues = Rcpp::clone(criticalValues); if (typeOfDesign == C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY) { for (int k = 0; k < kMax - 1; k++) { userAlphaSpending[k] = 0; criticalValues[k] = getQNormThreshold(); } userAlphaSpending[kMax - 1] = alpha; criticalValues[kMax - 1] = getOneMinusQNorm(alpha / sided); } // Check which of the futilityBounds are negative for the corresponding one-sided case. // For these stages, no two-sided futlityBounds are calculated. NumericVector futilityBoundsOneSided = getDesignGroupSequentialBetaSpendingOneSidedCpp( criticalValues, kMax, userAlphaSpending / 2.0, userBetaSpending, informationRates, bindingFutility, tolerance, typeOfDesign, typeBetaSpending, gammaA, gammaB, alpha / 2.0, beta )["futilityBounds"]; NumericVector betaSpendingValues; if (typeBetaSpending == C_TYPE_OF_DESIGN_BS_USER) { betaSpendingValues = userBetaSpending; } else { betaSpendingValues = NumericVector(kMax, NA_REAL); for (int k = 0; k < kMax; k++) { betaSpendingValues[k] = getSpendingValueCpp(beta, (double) informationRates[k], sided, typeBetaSpending, gammaB); } } double kMin = getFirstIndexOfValuLargerZero(futilityBoundsOneSided); betaSpendingValues = getAdjustedBetaSpendingValues( kMax, kMin, betaSpendingValues, betaAdjustment); NumericVector futilityBounds; double shiftResult; if (!bindingFutility) { shiftResult = bisection2([&](double shift) { futilityBounds = getFutilityBoundsTwoSided( kMax, betaSpendingValues, informationRates, futilityBoundsOneSided, criticalValues, shift, tolerance); return (double) criticalValues[kMax - 1] - (double) futilityBounds[kMax - 1]; }, -4.0, 10.0, tolerance, C_MAX_NUMBER_OF_ITERATIONS); } else { futilityBounds = NumericVector(kMax, NA_REAL); shiftResult = bisection2([&](double shift) { for (int k = 1; k <= kMax; k++) { criticalValues[k - 1] = getCriticalValueTwoSided( kMax, k, criticalValues, userAlphaSpending, alpha, gammaA, typeOfDesign, informationRates, bindingFutility, futilityBounds, tolerance); futilityBounds[k - 1] = getFutilityBoundTwoSided(k, betaSpendingValues, informationRates, futilityBounds, futilityBoundsOneSided, criticalValues, shift, tolerance); } return (double) criticalValues[kMax - 1] - (double) futilityBounds[kMax - 1]; }, -4.0, 10.0, tolerance, C_MAX_NUMBER_OF_ITERATIONS); } NumericMatrix probs = getProbabilitiesForFutilityBounds(informationRates, criticalValues, futilityBounds, shiftResult, kMax, sided); NumericVector betaSpent = cumsum(probs(2, _) - probs(1, _)); NumericVector power(kMax); if (twoSidedPower) { power = (NumericVector) cumsum(probs(4, _) - probs(3, _) + probs(0, _)); } else { power = (NumericVector) cumsum(probs(4, _) - probs(3, _)); } futilityBounds = rangeVector(futilityBounds, 0, kMax - 2); futilityBounds[futilityBounds <= 1e-05] = NA_REAL; return List::create( _["futilityBounds"] = futilityBounds, _["criticalValues"] = criticalValues, _["betaSpent"] = betaSpent, _["power"] = power, _["shift"] = shiftResult ); } // [[Rcpp::export]] List getDesignGroupSequentialBetaSpendingCpp( NumericVector criticalValues, int kMax, NumericVector userAlphaSpending, NumericVector userBetaSpending, NumericVector informationRates, bool bindingFutility, double tolerance, String typeOfDesign, String typeBetaSpending, double gammaA, double gammaB, double alpha, double beta, double sided, bool betaAdjustment, bool twoSidedPower ) { if (sided == 1) { return getDesignGroupSequentialBetaSpendingOneSidedCpp( criticalValues, kMax, userAlphaSpending, userBetaSpending, informationRates, bindingFutility, tolerance, typeOfDesign, typeBetaSpending, gammaA, gammaB, alpha, beta ); } return getDesignGroupSequentialBetaSpendingTwoSidedCpp( criticalValues, kMax, userAlphaSpending, userBetaSpending, informationRates, bindingFutility, tolerance, typeOfDesign, typeBetaSpending, gammaA, gammaB, alpha, beta, betaAdjustment, twoSidedPower ); } // [[Rcpp::export]] List getDesignGroupSequentialUserDefinedBetaSpendingCpp( NumericVector criticalValues, int kMax, NumericVector userAlphaSpending, NumericVector userBetaSpending, double sided, NumericVector informationRates, bool bindingFutility, double tolerance, String typeOfDesign, double gammaA, double alpha, bool betaAdjustment, bool twoSidedPower ) { String typeBetaSpending = C_TYPE_OF_DESIGN_BS_USER; double gammaB = NA_REAL; double beta = NA_REAL; return getDesignGroupSequentialBetaSpendingCpp( criticalValues, kMax, userAlphaSpending, userBetaSpending, informationRates, bindingFutility, tolerance, typeOfDesign, typeBetaSpending, gammaA, gammaB, alpha, beta, sided, betaAdjustment, twoSidedPower ); } rpact/src/rpact_types.h0000644000176200001440000000400214366155453014670 0ustar liggesusers/** * * -- Type definitions -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 6784 $ * Last changed: $Date: 2023-01-31 10:11:06 +0100 (Di, 31 Jan 2023) $ * Last changed by: $Author: pahlke $ * */ #ifndef SRC_RPACT_TYPES_H_ #define SRC_RPACT_TYPES_H_ #include // [[Rcpp::plugins(cpp11)]] using namespace Rcpp; typedef double (*calcSubjectsFunctionMeansPtr)( int stage, bool meanRatio, double thetaH0, int groups, NumericVector plannedSubjects, NumericVector allocationRatioPlanned, NumericVector minNumberOfSubjectsPerStage, NumericVector maxNumberOfSubjectsPerStage, NumericVector sampleSizesPerStage, double thetaH1, double stDevH1, double conditionalPower, double conditionalCriticalValue); typedef double (*calcSubjectsFunctionRatesPtr)( int stage, bool riskRatio, double thetaH0, int groups, NumericVector plannedSubjects, bool directionUpper, NumericVector allocationRatioPlanned, NumericVector minNumberOfSubjectsPerStage, NumericVector maxNumberOfSubjectsPerStage, NumericVector sampleSizesPerStage, NumericVector conditionalPower, NumericVector overallRate, double conditionalCriticalValue, double farringtonManningValue1, double farringtonManningValue2); typedef double (*calcEventsFunctionSurvivalPtr)( int stage, double conditionalPower, double thetaH0, double estimatedTheta, NumericVector plannedEvents, NumericVector eventsOverStages, NumericVector minNumberOfEventsPerStage, NumericVector maxNumberOfEventsPerStage, double allocationRatioPlanned, double conditionalCriticalValue); #endif /* SRC_RPACT_TYPES_H_ */ rpact/src/f_simulation_survival_utilities.h0000644000176200001440000000305014435554745021060 0ustar liggesusers/** * * -- Simulation survival utilities -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 7019 $ * Last changed: $Date: 2023-05-31 07:23:47 +0200 (Mi, 31 Mai 2023) $ * Last changed by: $Author: pahlke $ * */ #include // [[Rcpp::plugins(cpp11)]] using namespace Rcpp; #ifndef PKG_RPACT_H2 #define PKG_RPACT_H2 double findObservationTime( NumericVector accrualTime, NumericVector survivalTime, NumericVector dropoutTime, double requiredStageEvents); double getNormalDistribution(double p); double getNormalQuantile(double p); double getRandomExponentialDistribution(double rate); double getRandomSurvivalDistribution(double rate, double kappa); double getRandomPiecewiseExponentialDistribution( NumericVector cdfValues, NumericVector piecewiseLambda, NumericVector piecewiseSurvivalTime); bool isPiecewiseExponentialSurvivalEnabled(NumericVector lambdaVec2); double getLambdaByPi(double pi, double eventTime, double kappa); double getPiByLambda(double lambda, double eventTime, double kappa); double getHazardRatio(double pi1, double pi2, double eventTime, double kappa); #endif rpact/src/f_simulation_base_survival.cpp0000644000176200001440000010310014366155453020303 0ustar liggesusers/** * * -- Simulation of survival data with group sequential and combination test -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 6784 $ * Last changed: $Date: 2023-01-31 10:11:06 +0100 (Di, 31 Jan 2023) $ * Last changed by: $Author: pahlke $ * */ #include // [[Rcpp::plugins(cpp11)]] #include "f_utilities.h" #include "f_simulation_survival_utilities.h" #include "rpact_types.h" using namespace Rcpp; // Log Rank Test // // This function calculates the logrank test statistic for the survival data set at given time, // i.e., it determines whether an event or a dropout // was observed, calculates the time under risk, and the logrank statistic. // // @param accrualTime An double vector // List logRankTest(NumericVector accrualTime, NumericVector survivalTime, NumericVector dropoutTime, IntegerVector treatmentGroup, double time, bool directionUpper, double thetaH0, bool returnRawData) { int numberOfSubjects = accrualTime.size(); int subjectsT1 = 0; int subjectsT2 = 0; NumericVector timeUnderObservation = NumericVector(numberOfSubjects, 0.0); LogicalVector event = LogicalVector(numberOfSubjects, NA_LOGICAL); LogicalVector dropoutEvent = LogicalVector(numberOfSubjects, NA_LOGICAL); for (int i = 0; i < numberOfSubjects; i++) { if (accrualTime[i] > time) { treatmentGroup[i] = -treatmentGroup[i]; event[i] = false; dropoutEvent[i] = false; } else { if (treatmentGroup[i] == 1) { subjectsT1++; } else if (treatmentGroup[i] == 2) { subjectsT2++; } if (treatmentGroup[i] > 0 && accrualTime[i] + survivalTime[i] < time && (R_IsNA((double) dropoutTime[i]) || dropoutTime[i] > survivalTime[i])) { event[i] = true; } else { event[i] = false; } if (treatmentGroup[i] > 0 && accrualTime[i] + dropoutTime[i] < time && !R_IsNA((double) dropoutTime[i]) && dropoutTime[i] < survivalTime[i]) { dropoutEvent[i] = true; } else { dropoutEvent[i] = false; } } if (event[i]) { timeUnderObservation[i] = survivalTime[i]; } else if (dropoutEvent[i]) { timeUnderObservation[i] = dropoutTime[i]; } else { timeUnderObservation[i] = time - accrualTime[i]; } } int numberOfSubjets = subjectsT1 + subjectsT2; NumericVector timeUnderObservationSorted = clone(timeUnderObservation).sort(); IntegerVector sortedIndex = match(timeUnderObservationSorted, timeUnderObservation); sortedIndex = sortedIndex - 1; LogicalVector eventSorted = event[sortedIndex]; IntegerVector treatmentGroupSorted = treatmentGroup[sortedIndex]; eventSorted = eventSorted[treatmentGroupSorted > 0]; treatmentGroupSorted = treatmentGroupSorted[treatmentGroupSorted > 0]; treatmentGroup = abs(treatmentGroup); double numerator = 0; double denominator = 0; int events1 = 0; int events2 = 0; for (int i = 0; i < eventSorted.size(); i++) { if (eventSorted[i]) { if (treatmentGroupSorted[i] == 1) { if (subjectsT1 + subjectsT2 > 0) { numerator -= subjectsT2 / (thetaH0 * subjectsT1 + subjectsT2); } events1++; } else if (treatmentGroupSorted[i] == 2) { if (subjectsT1 + subjectsT2 > 0) { numerator += 1 - subjectsT2 / (thetaH0 * subjectsT1 + subjectsT2); } events2++; } if (subjectsT1 + subjectsT2 > 0) { denominator += thetaH0 * subjectsT1 * subjectsT2 / pow(thetaH0 * subjectsT1 + subjectsT2, 2); } } if (treatmentGroupSorted[i] == 1) { subjectsT1--; } else if (treatmentGroupSorted[i] == 2) { subjectsT2--; } } double logRank; if (denominator > 0) { logRank = -numerator / sqrt(denominator); } else { logRank = R_NegInf; } if (!directionUpper) { logRank = -logRank; } NumericVector out(4); out[0] = logRank; out[1] = numberOfSubjets; out[2] = events1; out[3] = events2; if (returnRawData) { return List::create( _["result"] = out, _["timeUnderObservation"] = timeUnderObservation, _["event"] = event, _["dropoutEvent"] = dropoutEvent ); } return List::create( _["result"] = out ); } NumericVector getIndependentIncrements(int stage, NumericVector eventsOverStages, NumericVector logRankOverStages) { NumericVector independentIncrements = NumericVector(stage, NA_REAL); independentIncrements[0] = logRankOverStages[0]; const IntegerVector indices1 = seq(0, stage - 2); const IntegerVector indices2 = seq(1, stage - 1); independentIncrements[indices2] = vectorDivide( vectorMultiply(vectorSqrt(eventsOverStages[indices2]), logRankOverStages[indices2]) - vectorMultiply(vectorSqrt(eventsOverStages[indices1]), logRankOverStages[indices1]), vectorSqrt(eventsOverStages[indices2] - eventsOverStages[indices1])); return independentIncrements; } // Get Test Statistics // @param designNumber The design number: // 1: Group sequential design // 2: Inverse normal design // 3: Fisher design // NumericVector getTestStatistics(int stage, int designNumber, NumericVector informationRates, NumericVector eventsOverStages, NumericVector logRankOverStages) { // Group sequential design if (designNumber == 1) { return NumericVector::create(logRankOverStages[stage - 1], NA_REAL); } // Inverse normal design if (designNumber == 2) { if (stage == 1) { return NumericVector::create(logRankOverStages[0], 1 - getNormalDistribution((double) logRankOverStages[0])); } NumericVector independentIncrements = getIndependentIncrements(stage, eventsOverStages, logRankOverStages); const IntegerVector indices1 = seq(0, stage - 2); const IntegerVector indices2 = seq(1, stage - 1); double value = (sqrt((double) informationRates[0]) * independentIncrements[0] + vectorProduct(vectorSqrt(informationRates[indices2] - informationRates[indices1]), independentIncrements[indices2])) / sqrt((double) informationRates[stage - 1]); double pValueSeparate = 1 - getNormalDistribution((double) independentIncrements[stage - 1]); return NumericVector::create(value, pValueSeparate); } // Fisher design NumericVector independentIncrements = NumericVector(stage, NA_REAL); independentIncrements[0] = logRankOverStages[0]; NumericVector weightFisher = NumericVector(stage, NA_REAL); weightFisher[0] = 1; if (stage > 1) { independentIncrements = getIndependentIncrements(stage, eventsOverStages, logRankOverStages); const IntegerVector indices1 = seq(0, stage - 2); const IntegerVector indices2 = seq(1, stage - 1); weightFisher[indices2] = vectorDivide( vectorSqrt(informationRates[indices2] - informationRates[indices1]), sqrt((double) informationRates[0])); } const IntegerVector indices0 = seq(0, stage - 1); double value = vectorProduct(vectorPow(1 - pnorm(as(independentIncrements[indices0])), as(weightFisher[indices0]))); double pValueSeparate = 1 - getNormalDistribution((double) independentIncrements[stage - 1]); return NumericVector::create(value, pValueSeparate); } /** * Conditional critical value to reject the null hypotheses at the last stage of the trial * @param designNumber The design number: * 1: Group sequential design * 2: Inverse normal design * 3: Fisher design * */ double getConditionalCriticalValue(int designNumber, int stage, NumericVector criticalValues, NumericVector informationRates, NumericVector testStatisticOverStages) { if (designNumber == 3) { // Fisher design return getNormalQuantile( 1 - pow((double) criticalValues[stage - 1] / testStatisticOverStages[stage - 2], 1 / sqrt((double) (informationRates[stage - 1] - informationRates[stage - 2]) / informationRates[0] ) )); } return (sqrt((double) informationRates[stage - 1]) * criticalValues[stage - 1] - testStatisticOverStages[stage - 2] * sqrt((double) informationRates[stage - 2])) / sqrt((double) informationRates[stage - 1] - informationRates[stage - 2]); } // used effect size is either estimated from test statistic or pre-fixed double getEstimatedTheta( int stage, double thetaH1, bool directionUpper, NumericVector eventsOverStages, NumericVector logRankOverStages, double allocationRatioPlanned) { if (!R_IsNA(thetaH1)) { return directionUpper ? thetaH1 : 1 / thetaH1; } return exp((double) logRankOverStages[stage - 2] * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * eventsOverStages[stage - 2])); } /** * Get recalculated event sizes (only for stage > 1) */ double getSimulationSuvivalStageEventsCpp( int stage, double conditionalPower, double thetaH0, double estimatedTheta, NumericVector plannedEvents, NumericVector eventsOverStages, NumericVector minNumberOfEventsPerStage, NumericVector maxNumberOfEventsPerStage, double allocationRatioPlanned, double conditionalCriticalValue) { double theta = max(NumericVector::create(1 + 1E-12, estimatedTheta)); double requiredStageEvents = pow(max(NumericVector::create(0, conditionalCriticalValue + getNormalQuantile(conditionalPower))), 2) * pow(1 + allocationRatioPlanned, 2) / (allocationRatioPlanned) / pow(log(theta / thetaH0), 2); requiredStageEvents = min(NumericVector::create( max(NumericVector::create(minNumberOfEventsPerStage[stage - 1], requiredStageEvents)), maxNumberOfEventsPerStage[stage - 1])) + eventsOverStages[stage - 2]; return requiredStageEvents; } Rcpp::XPtr getSimulationSuvivalStageEventsXPtrCpp() { return(Rcpp::XPtr(new calcEventsFunctionSurvivalPtr(&getSimulationSuvivalStageEventsCpp))); } NumericMatrix getSimulationStepResultsSurvival( int designNumber, int kMax, int sided, NumericVector criticalValues, NumericVector informationRates, double conditionalPower, NumericVector plannedEvents, double thetaH1, NumericVector minNumberOfEventsPerStage, NumericVector maxNumberOfEventsPerStage, bool directionUpper, double allocationRatioPlanned, NumericVector accrualTime, NumericVector survivalTime, NumericVector dropoutTime, IntegerVector treatmentGroup, double thetaH0, NumericVector futilityBounds, NumericVector alpha0Vec, int calcEventsFunctionType, Nullable calcEventsFunctionR, Rcpp::XPtr calcEventsFunctionCpp) { NumericVector eventsOverStages = NumericVector(kMax, 0.0); NumericVector logRankOverStages = NumericVector(kMax, 0.0); NumericVector testStatisticOverStages = NumericVector(kMax, 0.0); NumericVector analysisTime = NumericVector(kMax, 0.0); NumericVector subjects = NumericVector(kMax, 0.0); NumericVector expectedNumberOfEvents1 = NumericVector(kMax, 0.0); NumericVector expectedNumberOfEvents2 = NumericVector(kMax, 0.0); NumericVector expectedNumberOfEvents = NumericVector(kMax, 0.0); NumericVector rejections = NumericVector(kMax, 0.0); NumericVector eventsNotAchieved = NumericVector(kMax, 0.0); NumericVector futilityStops = NumericVector(kMax, 0.0); NumericVector pValuesSeparate = NumericVector(kMax, NA_REAL); NumericVector duration = NumericVector(kMax, 0.0); NumericVector iterations = NumericVector(kMax, 0.0); NumericVector hazardRates1 = NumericVector(kMax, 0.0); NumericVector hazardRates2 = NumericVector(kMax, 0.0); NumericVector hazardRatiosEstimate = NumericVector(kMax, 0.0); NumericVector observationTimePerStage = NumericVector(kMax, NA_REAL); NumericVector conditionalPowerAchieved = NumericVector(kMax, 0.0); for (int k = 1; k <= kMax; k++) { double conditionalCriticalValue = getConditionalCriticalValue(designNumber, k, criticalValues, informationRates, testStatisticOverStages); double estimatedTheta = getEstimatedTheta(k, thetaH1, directionUpper, eventsOverStages, logRankOverStages, allocationRatioPlanned); double stageEvents = plannedEvents[k - 1]; if (!R_IsNA(conditionalPower) && k > 1) { if (calcEventsFunctionType == 1 && calcEventsFunctionR.isNotNull()) { stageEvents = Rf_asReal( as(calcEventsFunctionR)( _["stage"] = k, _["conditionalPower"] = conditionalPower, _["thetaH0"] = thetaH0, _["estimatedTheta"] = estimatedTheta, _["plannedEvents"] = plannedEvents, _["eventsOverStages"] = eventsOverStages, _["minNumberOfEventsPerStage"] = minNumberOfEventsPerStage, _["maxNumberOfEventsPerStage"] = maxNumberOfEventsPerStage, _["allocationRatioPlanned"] = allocationRatioPlanned, _["conditionalCriticalValue"] = conditionalCriticalValue)); } else { calcEventsFunctionSurvivalPtr fun = *calcEventsFunctionCpp; stageEvents = fun(k, conditionalPower, thetaH0, estimatedTheta, plannedEvents, eventsOverStages, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, allocationRatioPlanned, conditionalCriticalValue); } } double observationTime = findObservationTime(accrualTime, survivalTime, dropoutTime, stageEvents); if (R_IsNA(observationTime)) { eventsNotAchieved[k - 1]++; break; } if (k > 1) { conditionalPowerAchieved[k - 1] = 1 - getNormalDistribution( conditionalCriticalValue - log(estimatedTheta) * sqrt(stageEvents - eventsOverStages[k - 2]) * sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) ); } else { conditionalPowerAchieved[k - 1] = NA_REAL; } observationTimePerStage[k - 1] = observationTime; List result = logRankTest( accrualTime, survivalTime, dropoutTime, treatmentGroup, observationTime, directionUpper, thetaH0, false); NumericVector survivalResult = result["result"]; double logRank = survivalResult[0]; double numberOfSubjects = survivalResult[1]; double events1 = survivalResult[2]; double events2 = survivalResult[3]; hazardRates1[k - 1] = NA_REAL; hazardRates2[k - 1] = NA_REAL; hazardRatiosEstimate[k - 1] = NA_REAL; eventsOverStages[k - 1] = events1 + events2; logRankOverStages[k - 1] = logRank; NumericVector testStatistic = getTestStatistics(k, designNumber, informationRates, eventsOverStages, logRankOverStages); testStatisticOverStages[k - 1] = testStatistic[0]; int trialStopEventCounter = 0; if (designNumber == 3) { // Fisher design pValuesSeparate[k - 1] = testStatistic[1]; if (testStatistic[0] <= criticalValues[k - 1]) { rejections[k - 1]++; trialStopEventCounter++; } if (k < kMax && (testStatistic[1] >= alpha0Vec[k - 1])) { futilityStops[k - 1]++; trialStopEventCounter++; } } else { // all other designs pValuesSeparate[k - 1] = testStatistic[1]; if ((sided == 1 && testStatistic[0] >= criticalValues[k - 1]) || (sided == 2 && std::abs((double) testStatistic[0]) >= criticalValues[k - 1])) { rejections[k - 1]++; trialStopEventCounter++; } if (sided == 1 && k < kMax && testStatistic[0] <= futilityBounds[k - 1]) { futilityStops[k - 1]++; trialStopEventCounter++; } } if (trialStopEventCounter > 0) { for (int i = 0; i < trialStopEventCounter; i++) { duration[k - 1] += observationTime; subjects[k - 1] += numberOfSubjects; } } else { subjects[k - 1] += numberOfSubjects; if (k == kMax) { duration[k - 1] += observationTime; } } expectedNumberOfEvents1[k - 1] += events1; expectedNumberOfEvents2[k - 1] += events2; double x = events1 + events2; if (k > 1) { x -= eventsOverStages[k - 2]; } expectedNumberOfEvents[k - 1] += x; analysisTime[k - 1] += observationTime; iterations[k - 1]++; if (trialStopEventCounter > 0) { break; } } NumericMatrix result(kMax, 18); result(_, 0) = analysisTime; result(_, 1) = subjects; result(_, 2) = expectedNumberOfEvents1; result(_, 3) = expectedNumberOfEvents2; result(_, 4) = expectedNumberOfEvents; result(_, 5) = rejections; result(_, 6) = eventsNotAchieved; result(_, 7) = futilityStops; result(_, 8) = duration; result(_, 9) = iterations; result(_, 10) = testStatisticOverStages; result(_, 11) = logRankOverStages; result(_, 12) = hazardRates1; result(_, 13) = hazardRates2; result(_, 14) = hazardRatiosEstimate; result(_, 15) = observationTimePerStage; result(_, 16) = conditionalPowerAchieved; result(_, 17) = pValuesSeparate; return result; } NumericMatrix getExtendedSurvivalDataSet(IntegerVector treatmentGroup, int maxNumberOfSubjects, double lambda1, double lambda2, double phi1, double phi2, double kappa) { NumericVector survivalTime = NumericVector(maxNumberOfSubjects, NA_REAL); NumericVector dropoutTime = NumericVector(maxNumberOfSubjects, NA_REAL); for (int i = 0; i < maxNumberOfSubjects; i++) { if (treatmentGroup[i] == 1) { survivalTime[i] = getRandomSurvivalDistribution(lambda1, kappa); if (phi1 > 0) { dropoutTime[i] = getRandomSurvivalDistribution(phi1, 1); } } else { survivalTime[i] = getRandomSurvivalDistribution(lambda2, kappa); if (phi2 > 0) { dropoutTime[i] = getRandomSurvivalDistribution(phi2, 1); } } } NumericMatrix result(maxNumberOfSubjects, 2); result(_, 0) = survivalTime; result(_, 1) = dropoutTime; return result; } NumericMatrix getExtendedSurvivalDataSet(IntegerVector treatmentGroup, int maxNumberOfSubjects, NumericVector piecewiseSurvivalTime, NumericVector cdfValues1, NumericVector cdfValues2, NumericVector lambdaVec1, NumericVector lambdaVec2, double phi1, double phi2) { NumericVector survivalTime = NumericVector(maxNumberOfSubjects, NA_REAL); NumericVector dropoutTime = NumericVector(maxNumberOfSubjects, NA_REAL); for (int i = 0; i < maxNumberOfSubjects; i++) { if (treatmentGroup[i] == 1) { survivalTime[i] = getRandomPiecewiseExponentialDistribution(cdfValues1, lambdaVec1, piecewiseSurvivalTime); if (phi1 > 0) { dropoutTime[i] = getRandomPiecewiseExponentialDistribution( cdfValues1, rep(phi1, lambdaVec1.size()), piecewiseSurvivalTime); } } else { survivalTime[i] = getRandomPiecewiseExponentialDistribution(cdfValues2, lambdaVec2, piecewiseSurvivalTime); if (phi2 > 0) { dropoutTime[i] = getRandomPiecewiseExponentialDistribution( cdfValues2, rep(phi2, lambdaVec2.size()), piecewiseSurvivalTime); } } } NumericMatrix result(maxNumberOfSubjects, 2); result(_, 0) = survivalTime; result(_, 1) = dropoutTime; return result; } /* Get Simulation Results * * This function calculates the simulation results for survival data. * * @param kappa The kappa value for the Weibull distribution; * if kappa = 1, then the exponential distribution will be used for simulation. */ // [[Rcpp::export]] List getSimulationSurvivalCpp( int designNumber, int kMax, int sided, NumericVector criticalValues, NumericVector informationRates, double conditionalPower, NumericVector plannedEvents, double thetaH1, NumericVector minNumberOfEventsPerStage, NumericVector maxNumberOfEventsPerStage, bool directionUpper, double allocationRatioPlanned, NumericVector accrualTime, IntegerVector treatmentGroup, double thetaH0, NumericVector futilityBounds, NumericVector alpha0Vec, NumericVector pi1Vec, double pi2, double eventTime, NumericVector piecewiseSurvivalTime, NumericVector cdfValues1, NumericVector cdfValues2, NumericVector lambdaVec1, NumericVector lambdaVec2, NumericVector phi, int maxNumberOfSubjects, int maxNumberOfIterations, int maxNumberOfRawDatasetsPerStage, double kappa, int calcEventsFunctionType, Nullable calcEventsFunctionR, SEXP calcEventsFunctionCpp) { Rcpp::XPtr calcEventsFunctionCppXPtr = getSimulationSuvivalStageEventsXPtrCpp(); if (calcEventsFunctionType == 0) { calcEventsFunctionR = NULL; } else if (calcEventsFunctionType == 2) { calcEventsFunctionR = NULL; calcEventsFunctionCppXPtr = Rcpp::XPtr(calcEventsFunctionCpp); } bool pwExpEnabled = isPiecewiseExponentialSurvivalEnabled(lambdaVec2); int n = 1; if (!pwExpEnabled) { n = pi1Vec.size(); } if (n < 1) { throw Rcpp::exception(tfm::format( "'pi1Vec' must have minimum length %s (is %s)", 1, pi1Vec.size()).c_str()); } int sumVectorLength = kMax * n; IntegerVector stages = IntegerVector(sumVectorLength, NA_INTEGER); NumericVector pi1Column = NumericVector(sumVectorLength, 0.0); NumericVector hazardRatioColumn = NumericVector(sumVectorLength, 0.0); NumericVector analysisTimeSum = NumericVector(sumVectorLength, 0.0); NumericVector subjectsSum = NumericVector(sumVectorLength, 0.0); NumericVector eventsSum = NumericVector(sumVectorLength, 0.0); NumericVector rejectionsSum = NumericVector(sumVectorLength, 0.0); NumericVector eventsNotAchievedSum = NumericVector(sumVectorLength, 0.0); NumericVector futilityStopsSum = NumericVector(sumVectorLength, 0.0); NumericVector durationsSum = NumericVector(sumVectorLength, 0.0); NumericVector iterationsSum = NumericVector(sumVectorLength, 0.0); NumericVector conditionalPowerAchievedSum = NumericVector(sumVectorLength, 0.0); int simResultsVectorLength = sumVectorLength * maxNumberOfIterations; IntegerVector iterationNumbers = IntegerVector(simResultsVectorLength, NA_INTEGER); IntegerVector stageNumbers = IntegerVector(simResultsVectorLength, NA_INTEGER); NumericVector pi1Values = NumericVector(simResultsVectorLength, NA_REAL); NumericVector hazardRatios = NumericVector(simResultsVectorLength, NA_REAL); NumericVector analysisTime = NumericVector(simResultsVectorLength, NA_REAL); NumericVector subjects = NumericVector(simResultsVectorLength, NA_REAL); NumericVector events1 = NumericVector(simResultsVectorLength, NA_REAL); NumericVector events2 = NumericVector(simResultsVectorLength, NA_REAL); NumericVector events = NumericVector(simResultsVectorLength, NA_REAL); NumericVector rejections = NumericVector(simResultsVectorLength, NA_REAL); NumericVector eventsNotAchieved = NumericVector(simResultsVectorLength, NA_REAL); NumericVector futilityStops = NumericVector(simResultsVectorLength, NA_REAL); NumericVector pValuesSeparate = NumericVector(simResultsVectorLength, NA_REAL); NumericVector testStatistics = NumericVector(simResultsVectorLength, NA_REAL); NumericVector logRankStatistics = NumericVector(simResultsVectorLength, NA_REAL); NumericVector hazardRates1 = NumericVector(simResultsVectorLength, NA_REAL); NumericVector hazardRates2 = NumericVector(simResultsVectorLength, NA_REAL); NumericVector hazardRatiosEstimate = NumericVector(simResultsVectorLength, NA_REAL); NumericVector conditionalPowerAchieved = NumericVector(simResultsVectorLength, NA_REAL); // raw datasets per stage int rawDataVectorLength = maxNumberOfRawDatasetsPerStage * n * kMax * maxNumberOfSubjects; IntegerVector rawDataPerStage = IntegerVector(kMax, 0); NumericVector rawDataIterationNumbers = NumericVector(rawDataVectorLength, NA_REAL); IntegerVector rawDataStageNumbers = IntegerVector(rawDataVectorLength, NA_INTEGER); NumericVector rawDataPi1Values = NumericVector(rawDataVectorLength, NA_REAL); IntegerVector rawDataSubjectIds = IntegerVector(rawDataVectorLength, NA_INTEGER); NumericVector rawDataAccrualTime = NumericVector(rawDataVectorLength, NA_REAL); IntegerVector rawDataTreatmentGroups = IntegerVector(rawDataVectorLength, NA_INTEGER); NumericVector rawDataSurvivalTime = NumericVector(rawDataVectorLength, NA_REAL); NumericVector rawDataDropoutTime = NumericVector(rawDataVectorLength, NA_REAL); NumericVector rawDataObservationTime = NumericVector(rawDataVectorLength, NA_REAL); NumericVector rawDataTimeUnderObservation = NumericVector(rawDataVectorLength, NA_REAL); LogicalVector rawDataEvent = LogicalVector(rawDataVectorLength, NA_LOGICAL); LogicalVector rawDataDropoutEvent = LogicalVector(rawDataVectorLength, NA_LOGICAL); IntegerVector rawDataCensorIndicator = IntegerVector(rawDataVectorLength, NA_INTEGER); NumericMatrix survivalDataSet; int index = 0; for (int pi1Index = 0; pi1Index < n; pi1Index++) { double pi1 = NA_REAL; double hazardRatio = NA_REAL; double lambda1 = NA_REAL; double lambda2 = NA_REAL; if (!pwExpEnabled) { if (R_IsNA((double) pi1Vec[pi1Index])) { lambda1 = lambdaVec1[pi1Index]; lambda2 = lambdaVec2[0]; } else { pi1 = pi1Vec[pi1Index]; lambda1 = getLambdaByPi(pi1, eventTime, kappa); lambda2 = getLambdaByPi(pi2, eventTime, kappa); } hazardRatio = pow(lambda1 / lambda2, kappa); } for (int k = 0; k < kMax; k++) { stages[pi1Index * kMax + k] = k + 1; } vectorInitC(pi1Index, kMax, REAL(pi1Column), pi1); vectorInitC(pi1Index, kMax, REAL(hazardRatioColumn), hazardRatio); for (int iterationIndex = 0; iterationIndex < maxNumberOfIterations; iterationIndex++) { if (!pwExpEnabled) { survivalDataSet = getExtendedSurvivalDataSet( treatmentGroup, maxNumberOfSubjects, lambda1, lambda2, (double) phi[0], (double) phi[1], kappa); } else { survivalDataSet = getExtendedSurvivalDataSet(treatmentGroup, maxNumberOfSubjects, piecewiseSurvivalTime, cdfValues1, cdfValues2, lambdaVec1, lambdaVec2, (double) phi[0], (double) phi[1]); } NumericVector survivalTime = survivalDataSet(_, 0); NumericVector dropoutTime = survivalDataSet(_, 1); NumericMatrix stepResults = getSimulationStepResultsSurvival( designNumber, kMax, sided, criticalValues, informationRates, conditionalPower, plannedEvents, thetaH1, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, directionUpper, allocationRatioPlanned, accrualTime, survivalTime, dropoutTime, treatmentGroup, thetaH0, futilityBounds, alpha0Vec, calcEventsFunctionType, calcEventsFunctionR, calcEventsFunctionCppXPtr); vectorSumC(pi1Index, 0, kMax, REAL(analysisTimeSum), stepResults); vectorSumC(pi1Index, 1, kMax, REAL(subjectsSum), stepResults); vectorSumC(pi1Index, 4, kMax, REAL(eventsSum), stepResults); vectorSumC(pi1Index, 5, kMax, REAL(rejectionsSum), stepResults); vectorSumC(pi1Index, 6, kMax, REAL(eventsNotAchievedSum), stepResults); vectorSumC(pi1Index, 7, kMax, REAL(futilityStopsSum), stepResults); vectorSumC(pi1Index, 8, kMax, REAL(durationsSum), stepResults); vectorSumC(pi1Index, 9, kMax, REAL(iterationsSum), stepResults); vectorSumC(pi1Index, 16, kMax, REAL(conditionalPowerAchievedSum), stepResults); // conditionalPowerAchieved // get data for (int k = 0; k < kMax; k++) { if (stepResults(k, 9) > 0) { iterationNumbers[index] = iterationIndex + 1; stageNumbers[index] = k + 1; pi1Values[index] = pi1; hazardRatios[index] = hazardRatio; analysisTime[index] = stepResults(k, 0); subjects[index] = stepResults(k, 1); events1[index] = stepResults(k, 2); events2[index] = stepResults(k, 3); events[index] = stepResults(k, 4); rejections[index] = stepResults(k, 5); eventsNotAchieved[index] = stepResults(k, 6); futilityStops[index] = stepResults(k, 7); testStatistics[index] = stepResults(k, 10); logRankStatistics[index] = stepResults(k, 11); hazardRates1[index] = stepResults(k, 12); hazardRates2[index] = stepResults(k, 13); hazardRatiosEstimate[index] = stepResults(k, 14); conditionalPowerAchieved[index] = stepResults(k, 16); pValuesSeparate[index] = stepResults(k, 17); index++; } } // get raw datasets per stage if (maxNumberOfRawDatasetsPerStage > 0) { double lastObservationTime = stepResults(kMax - 1, 15); for (int k = kMax - 1; k >= 0; k--) { int numberOfIterations = stepResults(k, 9); int numberOfRejections = stepResults(k, 5); int numberOfFutilityStops = stepResults(k, 7); if (rawDataPerStage[k] < maxNumberOfRawDatasetsPerStage && numberOfIterations > 0) { int start = k * maxNumberOfSubjects + pi1Index * kMax * maxNumberOfSubjects + rawDataPerStage[k] * n * kMax * maxNumberOfSubjects; double observationTime = stepResults(k, 15); if (R_IsNA(observationTime)) { break; } List logRankResult = logRankTest( accrualTime, survivalTime, dropoutTime, treatmentGroup, observationTime, directionUpper, thetaH0, true); NumericVector timeUnderObservation = logRankResult["timeUnderObservation"]; LogicalVector event = logRankResult["event"]; LogicalVector dropoutEvent = logRankResult["dropoutEvent"]; for (int i = 0; i < maxNumberOfSubjects; i++) { rawDataPi1Values[start + i] = pi1; rawDataIterationNumbers[start + i] = iterationIndex + 1; if (numberOfRejections == 0 && numberOfFutilityStops == 0) { rawDataStageNumbers[start + i] = kMax; } else { rawDataStageNumbers[start + i] = k + 1; } rawDataSubjectIds[start + i] = i + 1; rawDataAccrualTime[start + i] = accrualTime[i]; rawDataTreatmentGroups[start + i] = treatmentGroup[i]; rawDataSurvivalTime[start + i] = survivalTime[i]; rawDataDropoutTime[start + i] = dropoutTime[i]; if (numberOfRejections == 0 && numberOfFutilityStops == 0) { rawDataObservationTime[start + i] = lastObservationTime; } else { rawDataObservationTime[start + i] = observationTime; } rawDataTimeUnderObservation[start + i] = timeUnderObservation[i]; rawDataEvent[start + i] = event[i]; rawDataDropoutEvent[start + i] = dropoutEvent[i]; if (survivalTime[i] >= dropoutTime[i]) { rawDataCensorIndicator[start + i] = 0; } else { rawDataCensorIndicator[start + i] = 1; } } if (numberOfRejections == 0 && numberOfFutilityStops == 0) { rawDataPerStage[kMax - 1]++; } else { rawDataPerStage[k]++; } break; } } } } } NumericVector overallRejections = NumericVector(n, 0.0); NumericVector overallFutilityStops = NumericVector(n, 0.0); NumericVector duration = NumericVector(n, 0.0); NumericVector rejectionsRelative = vectorDivide(rejectionsSum, maxNumberOfIterations); NumericVector futilityStopsRelative = vectorDivide(futilityStopsSum, maxNumberOfIterations); for (int i = 0; i < n; i++) { double s1 = 0; double s2 = 0; double s3 = 0; for (int j = 0; j < kMax; j++) { s1 += rejectionsRelative[i * kMax + j]; s2 += futilityStopsRelative[i * kMax + j]; s3 += durationsSum[i * kMax + j]; } overallRejections[i] = s1; overallFutilityStops[i] = s2; duration[i] = s3 / maxNumberOfIterations; } DataFrame overview = DataFrame::create( Named("stages") = stages, Named("pi2") = NumericVector(sumVectorLength, pi2), Named("pi1") = pi1Column, Named("hazardRatioEstimate1") = hazardRatioColumn, Named("iterations") = iterationsSum, Named("eventsPerStage") = vectorDivide(eventsSum, iterationsSum), Named("eventsNotAchieved") = vectorDivide(eventsNotAchievedSum, maxNumberOfIterations), Named("numberOfSubjects") = vectorDivide(subjectsSum, iterationsSum), Named("rejectPerStage") = rejectionsRelative, Named("overallReject") = vectorRepEachValue(overallRejections, kMax), Named("futilityPerStage") = futilityStopsRelative, Named("futilityStop") = vectorRepEachValue(overallFutilityStops, kMax), Named("analysisTime") = vectorDivide(analysisTimeSum, iterationsSum), Named("studyDuration") = vectorRepEachValue(duration, kMax), Named("conditionalPowerAchieved") = vectorDivide(conditionalPowerAchievedSum, iterationsSum) ); LogicalVector trialStop = LogicalVector(simResultsVectorLength, NA_LOGICAL); NumericVector hazardRatioEstimateLR = NumericVector(simResultsVectorLength, NA_REAL); double logRankStatisticSign = directionUpper ? 1 : -1; for (int i = 0; i < rejections.length(); i++) { trialStop[i] = (rejections[i] == 1 || futilityStops[i] == 1 || stageNumbers[i] == kMax); if (!R_IsNA((double) events[i])) { hazardRatioEstimateLR[i] = exp(logRankStatisticSign * logRankStatistics[i] * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * (events1[i] + events2[i]))); } } DataFrame data = DataFrame::create( Named("iterationNumber") = iterationNumbers, Named("stageNumber") = stageNumbers, Named("pi1") = pi1Values, Named("pi2") = NumericVector(simResultsVectorLength, pi2), Named("hazardRatio") = hazardRatios, Named("analysisTime") = analysisTime, Named("numberOfSubjects") = subjects, Named("overallEvents1") = events1, Named("overallEvents2") = events2, Named("eventsPerStage") = events, Named("rejectPerStage") = rejections, Named("eventsNotAchieved") = eventsNotAchieved, Named("futilityPerStage") = futilityStops, Named("testStatistic") = testStatistics, Named("logRankStatistic") = logRankStatistics, Named("conditionalPowerAchieved") = conditionalPowerAchieved, Named("pValuesSeparate") = pValuesSeparate, Named("trialStop") = trialStop, Named("hazardRatioEstimateLR") = hazardRatioEstimateLR ); if (maxNumberOfRawDatasetsPerStage > 0) { DataFrame rawData = DataFrame::create( Named("iterationNumber") = rawDataIterationNumbers, Named("stopStage") = rawDataStageNumbers, Named("pi1") = rawDataPi1Values, Named("pi2") = NumericVector(rawDataVectorLength, pi2), Named("subjectId") = rawDataSubjectIds, Named("accrualTime") = rawDataAccrualTime, Named("treatmentGroup") = rawDataTreatmentGroups, Named("survivalTime") = rawDataSurvivalTime, Named("dropoutTime") = rawDataDropoutTime, Named("lastObservationTime") = rawDataObservationTime, // deprecated: observationTime Named("timeUnderObservation") = rawDataTimeUnderObservation, Named("event") = rawDataEvent, Named("dropoutEvent") = rawDataDropoutEvent, Named("censorIndicator") = rawDataCensorIndicator ); return List::create( _["overview"] = overview, _["data"] = data, _["rawData"] = rawData ); } return List::create( _["overview"] = overview, _["data"] = data ); } rpact/src/f_utilities.cpp0000644000176200001440000004354614410770117015214 0ustar liggesusers/** * * -- Simulation of survival data with group sequential and combination test -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 6902 $ * Last changed: $Date: 2023-03-29 10:01:19 +0200 (Mi, 29 Mrz 2023) $ * Last changed by: $Author: pahlke $ * */ #include // [[Rcpp::plugins(cpp11)]] using namespace Rcpp; const double C_QNORM_EPSILON = 1.0e-100; // a value between 1e-323 and 1e-16 const double C_QNORM_MAXIMUM = -R::qnorm(C_QNORM_EPSILON, 0, 1, 1, 0); const double C_QNORM_MINIMUM = -C_QNORM_MAXIMUM; const double C_QNORM_THRESHOLD = floor(C_QNORM_MAXIMUM); const double C_FUNCTION_ROOT_TOLERANCE_FACTOR = 100.0; struct Exception : std::exception { char text[1000]; Exception(char const* fmt, ...) __attribute__((format(printf,2,3))) { va_list ap; va_start(ap, fmt); vsnprintf(text, sizeof text, fmt, ap); va_end(ap); } char const* what() const throw() { return text; } }; double getQNormEpsilon() { return C_QNORM_EPSILON; } double getQNormThreshold() { return C_QNORM_THRESHOLD; } double getQNorm(double p, double mean = 0, double sd = 1, double lowerTail = 1, double logP = 0, double epsilon = 1.0e-100) { if (p <= 0) { p = epsilon; } if (p > 1) { p = 1; } double result = R::qnorm(p, mean, sd, lowerTail, logP); if (result < -C_QNORM_THRESHOLD) { result = C_QNORM_MINIMUM; } if (result > C_QNORM_THRESHOLD) { result = C_QNORM_MAXIMUM; } return result; } // [[Rcpp::export]] double getOneMinusQNorm(double p, double mean = 0, double sd = 1, double lowerTail = 1, double logP = 0, double epsilon = 1.0e-100) { if (p <= 0) { p = epsilon; } if (p > 1) { p = 1; } double result; if (p < 0.5) { result = -R::qnorm(p, mean, sd, lowerTail, logP); } else { // prevent values that are close to 1 from becoming Inf, see qnorm(1) // example: 1 - 1e-17 = 1 in R, i.e., qnorm(1 - 1e-17) = Inf // on the other hand: qnorm(1e-323) = -38.44939 result = R::qnorm(1 - p, mean, sd, lowerTail, logP); } if (result < -C_QNORM_THRESHOLD) { result = C_QNORM_MINIMUM; } if (result > C_QNORM_THRESHOLD) { result = C_QNORM_MAXIMUM; } return result; } double getOneMinusPNorm(double q, double mean = 0, double sd = 1, double lowerTail = 1, double logP = 0, double epsilon = 1.0e-100) { // return 1 - R::pnorm(q, mean, sd, lowerTail, logP); if (q == 0) { return 0.5; } double result; if (q < 5) { result = 1.0 - R::pnorm(q, mean, sd, lowerTail, logP); } else { result = R::pnorm(-q, mean, sd, lowerTail, logP); } if (result <= 0) { result = epsilon; } return result; } template IntegerVector order_impl(const Vector &x, bool desc) { auto n = x.size(); IntegerVector idx = no_init(n); std::iota(idx.begin(), idx.end(), static_cast(1)); if (desc) { auto comparator = [&x](size_t a, size_t b) { return x[a - 1] > x[b - 1]; }; std::stable_sort(idx.begin(), idx.end(), comparator); } else { auto comparator = [&x](size_t a, size_t b) { return x[a - 1] < x[b - 1]; }; std::stable_sort(idx.begin(), idx.end(), comparator); // simulate na.last size_t nas = 0; for (int i = 0; i < n; ++i, ++nas) if (!Vector::is_na(x[idx[i] - 1])) break; std::rotate(idx.begin(), idx.begin() + nas, idx.end()); } return idx; } // identical to the R function base::order() IntegerVector getOrder(SEXP x, bool desc = false) { switch (TYPEOF(x)) { case INTSXP: return order_impl(x, desc); case REALSXP: return order_impl(x, desc); case STRSXP: return order_impl(x, desc); default: stop("Unsupported type."); } return IntegerVector::create(); } NumericVector vectorSum(NumericVector x, NumericVector y) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = x[i] + y[i]; } return result; } NumericVector vectorSub(NumericVector x, NumericVector y) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = x[i] - y[i]; } return result; } double vectorSum(NumericVector x) { int n = x.size(); if (n <= 1) { return n == 0 ? 0 : x[0]; } double s = x[0]; for (int i = 1; i < n; i++) { s += x[i]; } return s; } NumericVector vectorSqrt(NumericVector x) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = sqrt((double) x[i]); } return result; } NumericVector vectorDivide(NumericVector x, double value) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = x[i] / value; } return result; } NumericVector vectorDivide(NumericMatrix x, int rowIndex, double value) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = x(rowIndex, i) / value; } return result; } NumericVector vectorDivide(NumericVector x, NumericVector y) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { if (y[i] != 0.0) { result[i] = x[i] / y[i]; } } return result; } NumericVector vectorMultiply(NumericVector x, double multiplier) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = x[i] * multiplier; } return result; } NumericVector vectorMultiply(NumericVector x, NumericVector y) { if (x.size() != y.size()) { throw Exception("Failed to multiply vectors: size is different (%i != %i)", (int) x.size(), (int) y.size()); } int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = x[i] * y[i]; } return result; } NumericVector vectorPow(NumericVector x, NumericVector y) { int n = x.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = pow((double) x[i], (double) y[i]); } return result; } NumericVector vectorPow(double x, NumericVector y) { int n = y.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = pow(x, (double) y[i]); } return result; } NumericVector vectorPow2(NumericVector y, double exp) { int n = y.size(); NumericVector result = NumericVector(n, NA_REAL); for (int i = 0; i < n; i++) { result[i] = pow((double) y[i], exp); } return result; } NumericVector vectorRepEachValue(NumericVector x, int kMax) { int n = x.size(); NumericVector result = NumericVector(n * kMax, NA_REAL); for (int i = 0; i < n; i++) { for (int j = 0; j < kMax; j++) { result[i * kMax + j] = x[i]; } } return result; } double vectorProduct(NumericVector x) { int n = x.size(); if (n == 0) { return 0; } if (n == 1) { return x[0]; } double s = x[0]; for (int i = 1; i < n; i++) { s *= x[i]; } return s; } double vectorProduct(NumericVector x, NumericVector y) { int n = x.size(); double s = 0; for (int i = 0; i < n; i++) { s += x[i] * y[i]; } return s; } double round(double value, int digits) { double mult = std::pow(10.0, (double) digits); return round(value * mult) / mult; } void vectorSumC(int i, int j, int kMax, double *x, NumericMatrix y) { for (int k = 0; k < kMax; k++) { x[i * kMax + k] += y(k, j); } } void vectorInitC(int i, int kMax, double *x, double value) { for (int k = 0; k < kMax; k++) { x[i * kMax + k] = value; } } NumericVector concat(NumericVector a, NumericVector b) { for (int i = 0; i < b.size(); i++) { a.insert(a.end(), b[i]); } return a; } NumericMatrix matrixAdd(NumericMatrix x, NumericMatrix y) { NumericMatrix result(x.nrow(), x.ncol()); for (int i = 0; i < x.nrow(); ++i) { for (int j = 0; j < x.ncol(); ++j) { result(i, j) = x(i, j) + y(i, j); } } return result; } NumericMatrix matrixSub(NumericMatrix x, NumericMatrix y) { NumericMatrix result(x.nrow(), x.ncol()); for (int i = 0; i < x.nrow(); ++i) { for (int j = 0; j < x.ncol(); ++j) { result(i, j) = x(i, j) - y(i, j); } } return result; } NumericMatrix matrixMultiply(NumericMatrix x, double y) { NumericMatrix result(x.nrow(), x.ncol()); for (int i = 0; i < x.nrow(); ++i) { for (int j = 0; j < x.ncol(); ++j) { result(i, j) = x(i, j) * y; } } return result; } /** * Calculates root of function f in given interval using the bisection method */ double bisection2(std::function f, double lower, double upper, double tolerance, int maxIter) { int step = 1; double value = 1; double result = NA_REAL; do { value = (lower + upper) / 2; result = f(value); if (result > 0) { lower = value; } else { upper = value; } step++; if (step > maxIter) { throw std::invalid_argument("No root within tolerance after given iterations found."); } } while ((upper - lower) > tolerance); return std::abs(result / C_FUNCTION_ROOT_TOLERANCE_FACTOR) > tolerance ? NA_REAL : value; } /** * Calculates root of function f in given interval using the bisection method */ double bisection(std::function f, double lower, double upper, double tolerance, int maxIter) { int step = 1; double value; double result = NA_REAL; do { value = (lower + upper) / 2; result = f(value); if ((result < 0) == (f(lower) < 0)) { // since signs are now directly compared lower = value; } else { upper = value; } step++; if (step > maxIter) { throw std::invalid_argument("No root within tolerance after given iterations found."); } } while ((upper - lower) > tolerance); return std::abs(result / C_FUNCTION_ROOT_TOLERANCE_FACTOR) > tolerance ? NA_REAL : value; } /** * Calculates root of function f in given interval using the Brent method * See https://www.netlib.org/c/index.html * See https://www.netlib.org/c/brent.shar */ double bizero(std::function f, double lower, double upper, double tolerance, int maxIter) { double a, b, c; double fa; double fb; double fc; int iter = 0; a = lower; b = upper; fa = f(a); fb = f(b); c = a; fc = fa; for (;;) { double prev_step = b - a; double tol_act; double p; double q; double new_step; if (std::abs(fc) < std::abs(fb)) { a = b; b = c; c = a; fa = fb; fb = fc; fc = fa; } tol_act = 2 * std::numeric_limits::epsilon() * std::abs(b) + tolerance / 2; new_step = (c - b) / 2; if (std::abs(new_step) <= tol_act || fb == (double) 0) { if (std::abs(fb / C_FUNCTION_ROOT_TOLERANCE_FACTOR) > tolerance) { return bisection(f, lower, upper, tolerance, maxIter); } return b; } if (std::abs(prev_step) >= tol_act && std::abs(fa) > std::abs(fb)) { double t1, cb, t2; cb = c - b; if (a == c) { t1 = fb / fa; p = cb * t1; q = 1.0 - t1; } else { q = fa / fc; t1 = fb / fc; t2 = fb / fa; p = t2 * (cb * q * (q - t1) - (b - a) * (t1 - 1.0)); q = (q - 1.0) * (t1 - 1.0) * (t2 - 1.0); } if (p > (double) 0) q = -q; else p = -p; if (p < (0.75 * cb * q - std::abs(tol_act * q) / 2) && p < std::abs(prev_step * q / 2)) new_step = p / q; } if (std::abs(new_step) < tol_act) { if (new_step > (double) 0) { new_step = tol_act; } else { new_step = -tol_act; } } a = b; fa = fb; b += new_step; fb = f(b); if ((fb > 0 && fc > 0) || (fb < 0 && fc < 0)) { c = a; fc = fa; } iter++; if (iter > maxIter) { throw std::invalid_argument("No root within tolerance after given iterations found"); } } return bisection(f, lower, upper, tolerance, maxIter); } /** * Calculates root of function f in given interval using the Brent method * See https://www.netlib.org/c/index.html * See https://www.netlib.org/c/brent.shar */ double zeroin(std::function f, double lower, double upper, double tolerance, int maxIter) { double a, b, c; double fa; double fb; double fc; int iter = 0; a = lower; b = upper; fa = f(a); fb = f(b); c = a; fc = fa; for (;;) { double prev_step = b - a; double tol_act; double p; double q; double new_step; if (std::abs(fc) < std::abs(fb)) { a = b; b = c; c = a; fa = fb; fb = fc; fc = fa; } tol_act = 2 * std::numeric_limits::epsilon() * std::abs(b) + tolerance / 2; new_step = (c - b) / 2; if (std::abs(new_step) <= tol_act || fb == (double) 0) { return b; } if (std::abs(prev_step) >= tol_act && std::abs(fa) > std::abs(fb)) { double t1, cb, t2; cb = c - b; if (a == c) { t1 = fb / fa; p = cb * t1; q = 1.0 - t1; } else { q = fa / fc; t1 = fb / fc; t2 = fb / fa; p = t2 * (cb * q * (q - t1) - (b - a) * (t1 - 1.0)); q = (q - 1.0) * (t1 - 1.0) * (t2 - 1.0); } if (p > (double) 0) q = -q; else p = -p; if (p < (0.75 * cb * q - std::abs(tol_act * q) / 2) && p < std::abs(prev_step * q / 2)) new_step = p / q; } if (std::abs(new_step) < tol_act) { if (new_step > (double) 0) { new_step = tol_act; } else { new_step = -tol_act; } } a = b; fa = fb; b += new_step; fb = f(b); if ((fb > 0 && fc > 0) || (fb < 0 && fc < 0)) { c = a; fc = fa; } iter++; if (iter > maxIter) { throw std::invalid_argument("No root within tolerance after given iterations found"); } } return NA_REAL; } // [[Rcpp::export]] double zeroin(Function f, double lower, double upper, double tolerance, int maxIter) { return zeroin([&](double x){return Rf_asReal(f(x));}, lower, upper, tolerance, maxIter); } double max(NumericVector x) { if (x.length() == 0) throw std::invalid_argument("Vector is Empty."); double max = x[0]; for (int i = 1; i < x.length(); i++) { if (x[i] > max) max = x[i]; } return max; } double min(NumericVector x) { if (x.length() == 0) throw std::invalid_argument("Vector is Empty."); double min = x[0]; for (int i = 1; i < x.length(); i++) { if (x[i] < min) min = x[i]; } return min; } /** * Returns the subvector of vector x with the given interval */ NumericVector rangeVector(NumericVector x, int from, int to) { int index = 0; NumericVector res; if (from <= to) { res = NumericVector(to - from + 1); for (int i = from; i <= to; i++) { res[index] = x[i]; index++; } } else { res = NumericVector(from - to + 1); for (int i = from; i >= to; i--) { res[index] = x[i]; index++; } } return res; } // [[Rcpp::export]] std::string getCipheredValue(String x) { std::size_t hashValue = std::hash{}(x); return std::to_string(hashValue); } void logDebug(std::string s) { Rcout << s << std::endl; } /** * Seed safe implementation of stats:rt() */ double getRandomTDistribution(double df, double ncp) { return Rcpp::rnorm(1, ncp)[0] / sqrt(R::rchisq(df) / df); } // [[Rcpp::export]] IntegerVector getFraction(double x, double epsilon = 1.0e-6, int maxNumberOfSearchSteps = 30) { int numerator = (int) floor(x); int numerator0; int numerator1 = 1; int denominator = 1; int denominator0; int denominator1 = 0; int factor0; double factor1 = x - (double) numerator; int i = 0; while (++i < maxNumberOfSearchSteps) { if (fabs(x - (double) numerator / (double) denominator) < epsilon) { break; } factor1 = 1 / factor1; factor0 = (int) floor(factor1); factor1 = factor1 - factor0; numerator0 = numerator1; numerator1 = numerator; numerator = factor0 * numerator1 + numerator0; denominator0 = denominator1; denominator1 = denominator; denominator = factor0 * denominator1 + denominator0; } return IntegerVector::create(numerator, denominator); } rpact/src/f_simulation_base_rates.cpp0000644000176200001440000010010414435554745017553 0ustar liggesusers/** * * -- Simulation of means with group sequential and combination test -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 7019 $ * Last changed: $Date: 2023-05-31 07:23:47 +0200 (Mi, 31 Mai 2023) $ * Last changed by: $Author: pahlke $ * */ #include // [[Rcpp::plugins(cpp11)]] #include "f_assertions.h" #include "f_utilities.h" #include "rpact_types.h" using namespace Rcpp; double sign(double x) { return x < 0 ? -1 : x == 0 ? 0 : 1; } NumericVector getFarringtonManningValuesDiffCpp(double rate1, double rate2, double theta, double allocation) { if (theta == 0) { double ml1 = (allocation * rate1 + rate2) / (1 + allocation); double ml2 = ml1; return NumericVector::create(ml1, ml2); } double a = 1 + 1 / allocation; double b = -(1 + 1 / allocation + rate1 + rate2 / allocation + theta * (1 / allocation + 2)); double c = pow(theta, 2) + theta * (2 * rate1 + 1 / allocation + 1) + rate1 + rate2 / allocation; double d = -theta * (1 + theta) * rate1; NumericVector v = NumericVector::create(pow(b, 3) / pow(3 * a, 3) - b * c / (6 * pow(a, 2)) + d / (2 * a)); double u, w; if (!R_IsNA((double) v[0]) && (v[0] == 0)) { u = sqrt(pow(b, 2) / pow(3 * a, 2) - c / (3 * a)); w = std::acos(-1) / 2; } else { u = sign((double) v[0]) * sqrt(pow(b, 2) / pow(3 * a, 2) - c / (3 * a)); w = 1.0 / 3.0 * (std::acos(-1.0) + std::acos((double) v[0] / pow(u, 3))); } double ml1 = std::min(std::max(0.0, 2 * u * cos(w) - b / (3 * a)), 1.0); double ml2 = std::min(std::max(0.0, ml1 - theta), 1.0); return NumericVector::create(ml1, ml2); } NumericVector getFarringtonManningValuesRatioCpp(double rate1, double rate2, double theta, double allocation) { if (theta == 1) { return rep((allocation * rate1 + rate2) / (1 + allocation), 2); } double a = 1 + 1 / allocation; double b = -((1 + rate2 / allocation) * theta + 1 / allocation + rate1); double c = (rate1 + rate2 / allocation) * theta; double ml1 = (-b - sqrt(pow(b, 2) - 4 * a * c)) / (2 * a); double ml2 = ml1 / theta; return NumericVector::create(ml1, ml2); } List getTestStatisticsRatesCpp(int designNumber, NumericVector informationRates, int groups, bool normalApproximation, bool riskRatio, double thetaH0, bool directionUpper, NumericMatrix eventsPerStage, NumericMatrix sampleSizesPerStage, Nullable testStatisticsPerStage_) { int stage = sampleSizesPerStage.ncol(); NumericVector testStatisticsPerStage = NumericVector(stage, NA_REAL); if (stage > 1 && testStatisticsPerStage_.isNotNull()) { NumericVector testStatisticsPerStageTemp; testStatisticsPerStageTemp = testStatisticsPerStage_; for (int i = 0; i < stage - 1; i++) { testStatisticsPerStage[i] = testStatisticsPerStageTemp[i]; } } NumericVector stagewiseRates(groups); NumericVector overallRate(groups); double value = NA_REAL; NumericVector pValuesSeparate = NumericVector(stage, NA_REAL); if (groups == 1L) { stagewiseRates = eventsPerStage[stage - 1] / sampleSizesPerStage[stage - 1]; double eventsPerStageSum = 0; double sampleSizesPerStageSum = 0; for (int i = 0; i < stage; i++) { eventsPerStageSum += eventsPerStage[i]; sampleSizesPerStageSum += sampleSizesPerStage[i]; } overallRate[0] = eventsPerStageSum / sampleSizesPerStageSum; } else { stagewiseRates = eventsPerStage(_, stage - 1) / sampleSizesPerStage(_, stage - 1); if (stage == 1) { overallRate = eventsPerStage(_, 0) / sampleSizesPerStage(_, 0); } else { NumericVector eventsPerStageSums(2); NumericVector sampleSizesPerStageSums(2); for (int i = 0; i < stage; i++) { eventsPerStageSums[0] += eventsPerStage(0, i); eventsPerStageSums[1] += eventsPerStage(1, i); sampleSizesPerStageSums[0] += sampleSizesPerStage(0, i); sampleSizesPerStageSums[1] += sampleSizesPerStage(1, i); } overallRate = eventsPerStageSums / sampleSizesPerStageSums; } } if (designNumber == 1L) { double n1 = sum(sampleSizesPerStage(0, _)); double e1 = sum(eventsPerStage(0, _)); double r1 = e1 / n1; if (groups == 1L) { if (!normalApproximation) { if (directionUpper) { value = getOneMinusQNorm(R::pbinom(e1 - 1.0, n1, thetaH0, false, false)); } else { value = getOneMinusQNorm(R::pbinom(e1, n1, thetaH0, true, false)); } } else { value = (r1 - thetaH0) / sqrt(thetaH0 * (1.0 - thetaH0)) * sqrt(n1); } } else { double n2 = sum(sampleSizesPerStage(1, _)); double e2 = sum(eventsPerStage(1, _)); double r2 = e2 / n2; if (!normalApproximation) { if (directionUpper) { value = getOneMinusQNorm(R::phyper(e1 - 1.0, e1 + e2, n1 + n2 - e1 - e2, n1, false, false)); } else { value = getOneMinusQNorm(R::phyper(e1, e1 + e2, n1 + n2 - e1 - e2, n1, true, false)); } } else { if (!riskRatio) { if (r1 - r2 - thetaH0 == 0) { value = 0; } else { NumericVector fm = getFarringtonManningValuesDiffCpp(r1, r2, thetaH0, n1 / n2); value = (r1 - r2 - thetaH0) / sqrt((double) fm[0] * (1.0 - fm[0]) / n1 + fm[1] * (1.0 - fm[1]) / n2); } } else { if (r1 - r2 * thetaH0 == 0) { value = 0; } else { NumericVector fm = getFarringtonManningValuesRatioCpp(r1, r2, thetaH0, n1 / n2); value = (r1 - r2 * thetaH0) / sqrt((double) fm[0] * (1.0 - fm[0]) / n1 + pow(thetaH0, 2) * fm[1] * (1.0 - fm[1]) / n2); } } value = (2.0 * directionUpper - 1.0) * value; } } } else { if (stage == 1L) { double n1 = sampleSizesPerStage(0, 0); double e1 = eventsPerStage(0, 0); double r1 = e1 / n1; if (groups == 1L) { if (!normalApproximation) { if (directionUpper) { testStatisticsPerStage[0] = getOneMinusQNorm(R::pbinom(e1 - 1, n1, thetaH0, false, false)); } else { testStatisticsPerStage[0] = getOneMinusQNorm(R::pbinom(e1, n1, thetaH0, true, false)); } } else { testStatisticsPerStage[0] = (2.0 * directionUpper - 1.0) * (r1 - thetaH0) / sqrt(thetaH0 * (1.0 - thetaH0)) * sqrt(n1); } } else { double n2 = sampleSizesPerStage(1, 0); double e2 = eventsPerStage(1, 0); double r2 = e2 / n2; if (!normalApproximation) { if (directionUpper) { testStatisticsPerStage[0] = getOneMinusQNorm( R::phyper(e1 - 1, e1 + e2, n1 + n2 - e1 - e2, n1, false, false)); } else { testStatisticsPerStage[0] = getOneMinusQNorm( R::phyper(e1, e1 + e2, n1 + n2 - e1 - e2, n1, true, false)); } } else { if (!riskRatio) { if (r1 - r2 - thetaH0 == 0) { testStatisticsPerStage[0] = 0; } else { NumericVector fm = getFarringtonManningValuesDiffCpp(r1, r2, thetaH0, n1 / n2); testStatisticsPerStage[0] = (2.0 * directionUpper - 1) * (r1 - r2 - thetaH0) / sqrt((double) fm[0] * (1.0 - fm[0]) / n1 + fm[1] * (1.0 - fm[1]) / n2); } } else { if (r1 - r2 * thetaH0 == 0) { testStatisticsPerStage[0] = 0; } else { NumericVector fm = getFarringtonManningValuesRatioCpp(r1, r2, thetaH0, n1 / n2); testStatisticsPerStage[0] = (2.0 * directionUpper - 1) * (r1 - r2 * thetaH0) / sqrt( (double) fm[0] * (1.0 - fm[0]) / n1 + pow(thetaH0, 2) * fm[1] * (1.0 - fm[1]) / n2); } } } } } else { double n1 = sampleSizesPerStage(0, stage - 1); double e1 = eventsPerStage(0, stage - 1); double r1 = e1 / n1; if (groups == 1L) { if (!normalApproximation) { if (directionUpper) { testStatisticsPerStage[stage - 1] = getOneMinusQNorm(R::pbinom(e1 - 1.0, n1, thetaH0, false, false)); } else { testStatisticsPerStage[stage - 1] = getOneMinusQNorm(R::pbinom(e1, n1, thetaH0, true, false)); } } else { testStatisticsPerStage[stage - 1] = (2.0 * directionUpper - 1.0) * (r1 - thetaH0) / sqrt(thetaH0 * (1.0 - thetaH0)) * sqrt(n1); } } else { double n2 = sampleSizesPerStage(1.0, stage - 1); double e2 = eventsPerStage(1.0, stage - 1); double r2 = e2 / n2; if (!normalApproximation) { if (directionUpper) { testStatisticsPerStage[stage - 1] = getOneMinusQNorm(R::phyper(e1 - 1.0, e1 + e2, n1 + n2 - e1 - e2, n1, false, false)); } else { testStatisticsPerStage[stage - 1] = getOneMinusQNorm(R::phyper(e1, e1 + e2, n1 + n2 - e1 - e2, n1, true, false)); } } else { if (!riskRatio) { if (r1 - r2 - thetaH0 == 0) { testStatisticsPerStage[stage - 1] = 0; } else { NumericVector fm = getFarringtonManningValuesDiffCpp(r1, r2, thetaH0, n1 / n2); testStatisticsPerStage[stage - 1] = (2.0 * directionUpper - 1.0) * (r1 - r2 - thetaH0) / sqrt((double) fm[0] * (1.0 - fm[0]) / n1 + fm[1] * (1.0 - fm[1]) / n2); } } else { if (r1 - r2 * thetaH0 == 0) { testStatisticsPerStage[stage - 1] = 0; } else { NumericVector fm = getFarringtonManningValuesRatioCpp(r1, r2, thetaH0, n1 / n2); testStatisticsPerStage[stage - 1] = (2.0 * directionUpper - 1.0) * (r1 - r2 * thetaH0) / sqrt( (double) fm[0] * (1.0 - fm[0]) / n1 + pow(thetaH0, 2) * fm[1] * (1.0 - fm[1]) / n2); } } } } } if (designNumber == 2L) { if (stage == 1) { value = testStatisticsPerStage[0]; } else { value = (sqrt((double) informationRates[0]) * testStatisticsPerStage[0] + vectorProduct( sqrt(rangeVector(informationRates, 1, stage - 1) - rangeVector(informationRates, 0, stage - 2)), rangeVector(testStatisticsPerStage, 1, stage - 1))) / sqrt((double) informationRates[stage - 1]); } } else if (designNumber == 3L) { if (stage == 1) { value = getOneMinusPNorm((double) testStatisticsPerStage[0]); } else { NumericVector weightsFisher(stage); weightsFisher[0] = 1; for (int i = 1; i < stage; i++) { weightsFisher[i] = sqrt((double) informationRates[i] - informationRates[i - 1]) / sqrt((double) informationRates[0]); } value = 1; for (int i = 0; i < stage; i++) { value *= pow(getOneMinusPNorm((double) testStatisticsPerStage[i]), (double) weightsFisher[i]); } } } pValuesSeparate = NumericVector(stage, NA_REAL); for (int i = 0; i < stage; i++) { pValuesSeparate[i] = getOneMinusPNorm((double) testStatisticsPerStage[i]); } } return List::create( _["value"] = value, _["stagewiseRates"] = stagewiseRates, _["overallRate"] = overallRate, _["sampleSizesPerStage"] = sampleSizesPerStage, _["testStatisticsPerStage"] = testStatisticsPerStage, _["pValuesSeparate"] = pValuesSeparate); } double getSimulationRatesStageSubjectsCpp( int stage, bool riskRatio, double thetaH0, int groups, NumericVector plannedSubjects, bool directionUpper, NumericVector allocationRatioPlanned, NumericVector minNumberOfSubjectsPerStage, NumericVector maxNumberOfSubjectsPerStage, NumericVector sampleSizesPerStage, NumericVector conditionalPower, NumericVector overallRate, double conditionalCriticalValue, double farringtonManningValue1, double farringtonManningValue2) { if (R_IsNA((double) conditionalPower[0])) { return plannedSubjects[stage - 1] - plannedSubjects[stage - 2]; } double stageSubjects; if (groups == 1L) { stageSubjects = pow( std::max(0.0, conditionalCriticalValue * sqrt(thetaH0 * (1 - thetaH0)) + getQNorm((double) conditionalPower[0]) * sqrt((double) overallRate[0] * (1 - overallRate[0]))), 2) / pow(std::max(1e-12, (2 * directionUpper - 1) * ((double) overallRate[0] - thetaH0)), 2); } else { double allocationRatio = allocationRatioPlanned[stage - 1]; double mult = 1; double corr = thetaH0; if (riskRatio) { mult = thetaH0; corr = 0; } stageSubjects = (1 + 1 / allocationRatio) * pow( std::max(0.0, conditionalCriticalValue * sqrt( farringtonManningValue1 * (1 - farringtonManningValue1) + farringtonManningValue2 * (1 - farringtonManningValue2) * allocationRatio * pow(mult, 2)) + getQNorm((double) conditionalPower[0]) * sqrt( (double) overallRate[0] * (1 - overallRate[0]) + overallRate[1] * (1 - overallRate[1]) * allocationRatio * pow(mult, 2))), 2) / pow(std::max(1e-12, (2 * directionUpper - 1) * ((double) overallRate[0] - mult * overallRate[1] - corr)), 2); } stageSubjects = ceil( std::min(std::max((double) minNumberOfSubjectsPerStage[stage - 1], stageSubjects), (double) maxNumberOfSubjectsPerStage[stage - 1])); return stageSubjects; } Rcpp::XPtr getSimulationRatesStageSubjectsXPtrCpp() { return(Rcpp::XPtr(new calcSubjectsFunctionRatesPtr(&getSimulationRatesStageSubjectsCpp))); } double getConditionalCriticalValueFisher( NumericVector criticalValues, List testStatistic, NumericVector informationRates, int k) { assertIsInInterval(k, "k", 1.0, informationRates.length()); assertIsInInterval(k, "k", 1.0, criticalValues.length()); double criticalValue = criticalValues[k - 1]; NumericVector testStatisticValue = testStatistic["value"]; double value = testStatisticValue[0]; double informationRateDiff = informationRates[k - 1] - informationRates[k - 2]; double baseValue = criticalValue / value; double powerValue = 1.0 / sqrt(informationRateDiff / informationRates[0]); return getOneMinusQNorm(pow(baseValue, powerValue)); } List getSimulationStepRatesCpp(int k, int kMax, int designNumber, NumericVector informationRates, NumericVector futilityBounds, NumericVector alpha0Vec, NumericVector criticalValues, bool riskRatio, double thetaH0, double pi1, double pi2, int groups, bool normalApproximation, NumericVector plannedSubjects, bool directionUpper, NumericVector allocationRatioPlanned, NumericVector minNumberOfSubjectsPerStage, NumericVector maxNumberOfSubjectsPerStage, NumericVector conditionalPower, NumericVector pi1H1, NumericVector pi2H1, NumericMatrix sampleSizesPerStage, NumericMatrix eventsPerStage, Nullable testStatisticsPerStage, List testStatistic, int calcSubjectsFunctionType, Nullable calcSubjectsFunctionR, Rcpp::XPtr calcSubjectsFunctionCpp) { NumericVector testStatisticsPerStage_ = NumericVector(0); if (testStatisticsPerStage.isNotNull()) { testStatisticsPerStage_ = testStatisticsPerStage; } double stageSubjects = plannedSubjects[0]; double allocationRatio = allocationRatioPlanned[k - 1]; // perform event size recalculation for stages 2, ..., kMax double simulatedConditionalPower = 0; double conditionalCriticalValue = NA_REAL; if (k > 1) { double theta = 0; NumericVector fm; // used effect size is either estimated from test statistic or pre-fixed NumericVector overallRate = testStatistic["overallRate"]; if (!R_IsNA((double) pi1H1[0])) { overallRate[0] = pi1H1[0]; } if (groups == 2L && !R_IsNA((double) pi2H1[0])) { overallRate[1] = pi2H1[0]; } // conditional critical value to reject the null hypotheses at the next stage of the trial if (designNumber == 3L) { conditionalCriticalValue = getConditionalCriticalValueFisher( criticalValues, testStatistic, informationRates, k); } else { if (criticalValues[k - 1] >= 6) { conditionalCriticalValue = R_PosInf; } else { conditionalCriticalValue = (criticalValues[k - 1] * sqrt((double) informationRates[k - 1]) - as(testStatistic["value"])[0] * sqrt((double) informationRates[k - 2])) / sqrt((double) informationRates[k - 1] - informationRates[k - 2]); } } if (groups == 2L) { if (!riskRatio) { fm = getFarringtonManningValuesDiffCpp((double) overallRate[0], (double) overallRate[1], thetaH0, allocationRatio); } else { fm = getFarringtonManningValuesRatioCpp((double) overallRate[0], (double) overallRate[1], thetaH0, allocationRatio); } } else { fm = rep(0.0, 2); } if (calcSubjectsFunctionType == 1 && calcSubjectsFunctionR.isNotNull()) { stageSubjects = Rf_asReal( as(calcSubjectsFunctionR)( _["stage"] = k, _["riskRatio"] = riskRatio, _["thetaH0"] = thetaH0, _["groups"] = groups, _["plannedSubjects"] = plannedSubjects, _["directionUpper"] = directionUpper, _["allocationRatioPlanned"] = allocationRatioPlanned, _["minNumberOfSubjectsPerStage"] = minNumberOfSubjectsPerStage, _["maxNumberOfSubjectsPerStage"] = maxNumberOfSubjectsPerStage, _["sampleSizesPerStage"] = sampleSizesPerStage, _["conditionalPower"] = conditionalPower, _["overallRate"] = overallRate, _["conditionalCriticalValue"] = conditionalCriticalValue, _["farringtonManningValue1"] = fm[0], _["farringtonManningValue2"] = fm[1])); } else { calcSubjectsFunctionRatesPtr fun = *calcSubjectsFunctionCpp; stageSubjects = fun(k, riskRatio, thetaH0, groups, plannedSubjects, directionUpper, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, sampleSizesPerStage, conditionalPower, overallRate, conditionalCriticalValue, (double) fm[0], (double) fm[1]); } // calculate conditional power for selected stageSubjects if (groups == 1L) { if (overallRate[0] * (1.0 - overallRate[0]) == 0) { theta = 0; } else { theta = (overallRate[0] - thetaH0) / sqrt((double) overallRate[0] * (1.0 - overallRate[0])) + sign((double) overallRate[0] - thetaH0) * conditionalCriticalValue * (1.0 - sqrt(thetaH0 * (1.0 - thetaH0) / (overallRate[0] * (1.0 - overallRate[0])))) / sqrt(stageSubjects); } } else { if (overallRate[0] * (1.0 - overallRate[0]) + overallRate[1] * (1.0 - overallRate[1]) == 0) { theta = 0; } else { if (!riskRatio) { theta = sqrt(allocationRatio) / (1.0 + allocationRatio) * ((overallRate[0] - overallRate[1] - thetaH0) * sqrt(1.0 + allocationRatio) / sqrt( (double) overallRate[0] * (1.0 - overallRate[0]) + allocationRatio * overallRate[1] * (1.0 - overallRate[1])) + sign((double) overallRate[0] - overallRate[1] - thetaH0) * conditionalCriticalValue * (1.0 - sqrt( (double) fm[0] * (1.0 - fm[0]) + allocationRatio * fm[1] * (1.0 - fm[1])) / sqrt( (double) overallRate[0] * (1.0 - overallRate[0]) + allocationRatio * overallRate[1] * (1.0 - overallRate[1]))) * (1.0 + allocationRatio) / sqrt(allocationRatio * stageSubjects)); } else { theta = sqrt(allocationRatio) / (1.0 + allocationRatio) * ((overallRate[0] - thetaH0 * overallRate[1]) * sqrt(1.0 + allocationRatio) / sqrt( (double) overallRate[0] * (1.0 - overallRate[0]) + allocationRatio * pow(thetaH0, 2) * overallRate[1] * (1.0 - overallRate[1])) + sign((double) overallRate[0] - thetaH0 * overallRate[1]) * conditionalCriticalValue * (1.0 - sqrt( (double) fm[0] * (1.0 - fm[0]) + allocationRatio * pow(thetaH0, 2) * fm[1] * (1.0 - fm[1])) / sqrt( (double) overallRate[0] * (1.0 - overallRate[0]) + allocationRatio * pow(thetaH0, 2) * overallRate[1] * (1.0 - overallRate[1]))) * (1.0 + allocationRatio) / sqrt(allocationRatio * stageSubjects)); } } } if (!directionUpper) { theta = -theta; } simulatedConditionalPower = getOneMinusPNorm(conditionalCriticalValue - theta * sqrt(stageSubjects)); } // simulate events with achieved sample size if (groups == 1L) { double n1 = stageSubjects; double v1 = R::rbinom(n1, pi1); eventsPerStage = cbind(eventsPerStage, NumericVector::create(v1)); sampleSizesPerStage = cbind(sampleSizesPerStage, NumericVector::create(n1)); } else { int n1 = std::ceil(allocationRatio * stageSubjects / (1.0 + allocationRatio)); int n2 = stageSubjects - n1; double v1 = R::rbinom(n1, pi1); double v2 = R::rbinom(n2, pi2); NumericVector eventsPerStageCol = NumericVector::create(v1, v2); NumericVector sampleSizesPerStageCol = NumericVector::create(n1, n2); eventsPerStage = cbind(eventsPerStage, eventsPerStageCol); sampleSizesPerStage = cbind(sampleSizesPerStage, sampleSizesPerStageCol); } testStatistic = getTestStatisticsRatesCpp(designNumber, informationRates, groups, normalApproximation, riskRatio, thetaH0, directionUpper, eventsPerStage, sampleSizesPerStage, testStatisticsPerStage_); NumericVector testStatisticsPerStageRet = testStatistic["testStatisticsPerStage"]; double var = testStatisticsPerStageRet[k - 1]; testStatisticsPerStage_.push_back(var); double simulatedRejections = 0; double simulatedFutilityStop = 0; bool trialStop = k == kMax; NumericVector testStatisticValues = testStatistic["value"]; double testStatisticValue = testStatisticValues[0]; double criticalValue = NA_REAL; if (k - 1 < criticalValues.size()) { criticalValue = criticalValues[k - 1]; } if (designNumber == 3L) { if (!R_IsNA(testStatisticValue) && !R_IsNA(criticalValue) && testStatisticValue <= criticalValue) { simulatedRejections = 1; trialStop = true; } if (k < kMax) { NumericVector testStatisticPValuesSeparate = testStatistic["pValuesSeparate"]; double testStatisticPValueSeparate = NA_REAL; if (k - 1 < testStatisticPValuesSeparate.size()) { testStatisticPValueSeparate = testStatisticPValuesSeparate[k - 1]; } double alpha0 = NA_REAL; if (k - 1 < alpha0Vec.size()) { alpha0 = alpha0Vec[k - 1]; } if (!R_IsNA(testStatisticPValueSeparate) && !R_IsNA(alpha0) && testStatisticPValueSeparate >= alpha0) { simulatedFutilityStop = 1; trialStop = true; } } } else { if (!R_IsNA(testStatisticValue) && !R_IsNA(criticalValue) && testStatisticValue >= criticalValue) { simulatedRejections = 1; trialStop = true; } if (k < kMax) { double futilityBound = NA_REAL; if (k - 1 < futilityBounds.size()) { futilityBound = futilityBounds[k - 1]; } if (!R_IsNA(testStatisticValue) && !R_IsNA(futilityBound) && testStatisticValue <= futilityBound) { simulatedFutilityStop = 1; trialStop = true; } } } return List::create( _["trialStop"] = trialStop, _["sampleSizesPerStage"] = sampleSizesPerStage, _["eventsPerStage"] = eventsPerStage, _["testStatisticsPerStage"] = testStatisticsPerStage_, _["testStatistic"] = testStatistic, _["simulatedSubjects"] = stageSubjects, _["simulatedRejections"] = simulatedRejections, _["simulatedFutilityStop"] = simulatedFutilityStop, _["simulatedConditionalPower"] = simulatedConditionalPower ); } // [[Rcpp::export]] List getSimulationRatesCpp( int kMax, NumericVector informationRates, NumericVector criticalValues, NumericVector pi1, double pi2, int maxNumberOfIterations, int designNumber, int groups, NumericVector futilityBounds, NumericVector alpha0Vec, NumericVector minNumberOfSubjectsPerStage, NumericVector maxNumberOfSubjectsPerStage, NumericVector conditionalPower, NumericVector pi1H1, NumericVector pi2H1, bool normalApproximation, NumericVector plannedSubjects, bool directionUpper, NumericVector allocationRatioPlanned, bool riskRatio, double thetaH0, int calcSubjectsFunctionType, Nullable calcSubjectsFunctionR, SEXP calcSubjectsFunctionCpp) { Rcpp::XPtr calcSubjectsFunctionCppXPtr = getSimulationRatesStageSubjectsXPtrCpp(); if (calcSubjectsFunctionType == 0) { calcSubjectsFunctionR = NULL; } else if (calcSubjectsFunctionType == 2) { calcSubjectsFunctionR = NULL; calcSubjectsFunctionCppXPtr = Rcpp::XPtr(calcSubjectsFunctionCpp); } int cols = pi1.length(); NumericMatrix sampleSizes(kMax, cols); NumericMatrix rejectPerStage(kMax, cols); NumericVector overallReject(cols); NumericMatrix futilityPerStage(kMax - 1, cols); NumericVector futilityStop(cols); NumericMatrix iterations(kMax, cols); NumericVector expectedNumberOfSubjects(cols); NumericMatrix conditionalPowerAchieved(kMax, cols); conditionalPowerAchieved.fill(NA_REAL); int dataLen = cols * maxNumberOfIterations * kMax; NumericVector dataIterationNumber = rep(NA_REAL, dataLen); NumericVector dataStageNumber = rep(NA_REAL, dataLen); NumericVector dataPi1 = rep(NA_REAL, dataLen); NumericVector dataPi2 = rep(pi2, dataLen); NumericVector dataNumberOfSubjects = rep(NA_REAL, dataLen); NumericVector dataNumberOfCumulatedSubjects = rep(NA_REAL, dataLen); NumericVector dataRejectPerStage = rep(NA_REAL, dataLen); NumericVector dataFutilityPerStage = rep(NA_REAL, dataLen); NumericVector dataTestStatistic = rep(NA_REAL, dataLen); NumericVector dataTestStatisticsPerStage = rep(NA_REAL, dataLen); NumericVector dataOverallRate1 = rep(NA_REAL, dataLen); NumericVector dataOverallRate2 = rep(NA_REAL, dataLen); NumericVector dataStagewiseRates1 = rep(NA_REAL, dataLen); NumericVector dataStagewiseRates2 = rep(NA_REAL, dataLen); NumericVector dataSampleSizesPerStage1 = rep(NA_REAL, dataLen); NumericVector dataSampleSizesPerStage2 = rep(NA_REAL, dataLen); LogicalVector dataTrialStop(dataLen); dataTrialStop.fill(NA_LOGICAL); NumericVector dataConditionalPowerAchieved = rep(NA_REAL, dataLen); NumericVector dataPValuesSeparate; if (designNumber != 1L) { dataPValuesSeparate = rep(NA_REAL, dataLen); } NumericVector overallRate; int index = 1; for (int i = 1; i <= pi1.size(); i++) { NumericVector simulatedSubjects(kMax); NumericVector simulatedRejections(kMax); NumericVector simulatedFutilityStop(kMax - 1); double simulatedOverallSubjects = 0; NumericVector simulatedConditionalPower(kMax); for (int j = 1; j <= maxNumberOfIterations; j++) { bool trialStop = false; NumericMatrix sampleSizesPerStage(groups, 0); NumericMatrix eventsPerStage(groups, 0); NumericVector testStatisticsPerStage = NumericVector::create(); List testStatistic; for (int k = 1; k <= kMax; k++) { if (!trialStop) { List stepResult = getSimulationStepRatesCpp( k, kMax, designNumber, informationRates, futilityBounds, alpha0Vec, criticalValues, riskRatio, thetaH0, (double) pi1[i - 1], pi2, groups, normalApproximation, plannedSubjects, directionUpper, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, pi1H1, pi2H1, sampleSizesPerStage, eventsPerStage, testStatisticsPerStage, testStatistic, calcSubjectsFunctionType, calcSubjectsFunctionR, calcSubjectsFunctionCppXPtr); trialStop = stepResult["trialStop"]; sampleSizesPerStage = as(stepResult["sampleSizesPerStage"]); eventsPerStage = as(stepResult["eventsPerStage"]); testStatisticsPerStage = as(stepResult["testStatisticsPerStage"]); testStatistic = as(stepResult["testStatistic"]); double simulatedSubjectsStep = stepResult["simulatedSubjects"]; double simulatedRejectionsStep = stepResult["simulatedRejections"]; double simulatedFutilityStopStep = stepResult["simulatedFutilityStop"]; double simulatedConditionalPowerStep = NA_REAL; if (k > 1) { simulatedConditionalPowerStep = stepResult["simulatedConditionalPower"]; } iterations(k - 1, i - 1) = iterations(k - 1, i - 1) + 1; simulatedSubjects[k - 1] = simulatedSubjects[k - 1] + simulatedSubjectsStep; simulatedRejections[k - 1] = simulatedRejections[k - 1] + simulatedRejectionsStep; if (k < kMax) { simulatedFutilityStop[k - 1] = simulatedFutilityStop[k - 1] + simulatedFutilityStopStep; } simulatedConditionalPower[k - 1] = simulatedConditionalPower[k - 1] + simulatedConditionalPowerStep; dataIterationNumber[index - 1] = j; dataStageNumber[index - 1] = k; dataPi1[index - 1] = pi1[i - 1]; dataNumberOfSubjects[index - 1] = simulatedSubjectsStep; dataNumberOfCumulatedSubjects[index - 1] = sum(sampleSizesPerStage); dataRejectPerStage[index - 1] = simulatedRejectionsStep; dataFutilityPerStage[index - 1] = simulatedFutilityStopStep; dataTestStatistic[index - 1] = testStatistic["value"]; dataTestStatisticsPerStage[index - 1] = testStatisticsPerStage[k - 1]; //overallRate = testStatistic["overallRate"]; //dataOverallRate1[index - 1] = overallRate[0]; dataOverallRate1[index - 1] = as(testStatistic["overallRate"])[0]; dataStagewiseRates1[index - 1] = as(testStatistic["stagewiseRates"])[0]; dataSampleSizesPerStage1[index - 1] = as(testStatistic["sampleSizesPerStage"])(0, k - 1); if (as(testStatistic["stagewiseRates"]).size() > 1) { dataOverallRate2[index - 1] = as(testStatistic["overallRate"])[1]; dataStagewiseRates2[index - 1] = as(testStatistic["stagewiseRates"])[1]; dataSampleSizesPerStage2[index - 1] = as(testStatistic["sampleSizesPerStage"])(1, k - 1); } else { dataStagewiseRates2[index - 1] = NA_REAL; dataOverallRate2[index - 1] = NA_REAL; dataSampleSizesPerStage2[index - 1] = NA_REAL; } dataTrialStop[index - 1] = trialStop; dataConditionalPowerAchieved[index - 1] = simulatedConditionalPowerStep; if (designNumber != 1L) { dataPValuesSeparate[index - 1] = as(testStatistic["pValuesSeparate"])[k - 1]; } index++; } } } simulatedOverallSubjects = sum(simulatedSubjects[Range(0, kMax - 1)]); sampleSizes(_, i - 1) = simulatedSubjects / iterations(_, i - 1); rejectPerStage(_, i - 1) = simulatedRejections / maxNumberOfIterations; overallReject[i - 1] = sum(simulatedRejections / maxNumberOfIterations); futilityPerStage(_, i - 1) = simulatedFutilityStop / maxNumberOfIterations; futilityStop[i - 1] = sum(simulatedFutilityStop / maxNumberOfIterations); expectedNumberOfSubjects[i - 1] = simulatedOverallSubjects / maxNumberOfIterations; if (kMax > 1) { for (int m = 2; m <= kMax; m++) { conditionalPowerAchieved(m - 1, i - 1) = simulatedConditionalPower(m - 1) / iterations(m - 1, i - 1); } } } for (int m = 0; m < sampleSizes.length(); m++) { if (NumericVector::is_na((double) sampleSizes[m])) { sampleSizes[m] = 0; } } NumericVector earlyStop; if (kMax > 1) { if (pi1.length() == 1) { earlyStop = sum(futilityPerStage) + sum(as(rejectPerStage)[Range(0, kMax - 2)]); } else { NumericVector rejectPerStageColSum; if (kMax > 2) { rejectPerStageColSum = colSums(rejectPerStage(Range(0, kMax - 2), _)); } else { rejectPerStageColSum = rejectPerStage(0, _); } earlyStop = colSums(futilityPerStage) + rejectPerStageColSum; } } else { earlyStop = rep(0, pi1.length()); } DataFrame data = DataFrame::create( _["iterationNumber"] = dataIterationNumber, _["stageNumber"] = dataStageNumber, _["pi1"] = dataPi1, _["pi2"] = pi2, _["numberOfSubjects"] = dataNumberOfSubjects, _["numberOfCumulatedSubjects"] = dataNumberOfCumulatedSubjects, _["rejectPerStage"] = dataRejectPerStage, _["futilityPerStage"] = dataFutilityPerStage, _["testStatistic"] = dataTestStatistic, _["testStatisticsPerStage"] = dataTestStatisticsPerStage, _["overallRate1"] = dataOverallRate1, _["overallRate2"] = dataOverallRate2, _["stagewiseRates1"] = dataStagewiseRates1, _["stagewiseRates2"] = dataStagewiseRates2, _["sampleSizesPerStage1"] = dataSampleSizesPerStage1, _["sampleSizesPerStage2"] = dataSampleSizesPerStage2, _["trialStop"] = dataTrialStop, _["conditionalPowerAchieved"] = round(dataConditionalPowerAchieved, 6)); if (designNumber != 1L) { data["pValue"] = dataPValuesSeparate; data = Rcpp::DataFrame(data); } return List::create( _["iterations"] = iterations, _["sampleSizes"] = sampleSizes, _["rejectPerStage"] = rejectPerStage, _["overallReject"] = overallReject, _["futilityPerStage"] = futilityPerStage, _["futilityStop"] = futilityStop, _["earlyStop"] = earlyStop, _["expectedNumberOfSubjects"] = expectedNumberOfSubjects, _["conditionalPowerAchieved"] = conditionalPowerAchieved, _["data"] = data ); } rpact/src/f_utilities.h0000644000176200001440000000675414435554745014677 0ustar liggesusers/** * * -- Simulation of survival data with group sequential and combination test -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 7019 $ * Last changed: $Date: 2023-05-31 07:23:47 +0200 (Mi, 31 Mai 2023) $ * Last changed by: $Author: pahlke $ * */ #include // [[Rcpp::plugins(cpp11)]] using namespace Rcpp; #ifndef PKG_RPACT_H #define PKG_RPACT_H struct Exception : std::exception { char text[1000]; Exception(char const* fmt, ...) __attribute__((format(printf,2,3))) { va_list ap; va_start(ap, fmt); vsnprintf(text, sizeof text, fmt, ap); va_end(ap); } char const* what() const throw() { return text; } }; double getQNormEpsilon(); double getQNormThreshold(); double getQNorm(double p, double mean = 0, double sd = 1, double lowerTail = 1, double logP = 0, double epsilon = getQNormEpsilon()); double getOneMinusQNorm(double p, double mean = 0, double sd = 1, double lowerTail = 1, double logP = 0, double epsilon = getQNormEpsilon()); double getOneMinusPNorm(double q, double mean = 0, double sd = 1, double lowerTail = 1, double logP = 0, double epsilon = getQNormEpsilon()); IntegerVector getOrder(SEXP x, bool desc = false); NumericVector vectorSum(NumericVector x, NumericVector y); NumericVector vectorSub(NumericVector x, NumericVector y); double vectorSum(NumericVector x); NumericVector vectorSqrt(NumericVector x); NumericVector vectorDivide(NumericVector x, double value); NumericVector vectorDivide(NumericMatrix x, int rowIndex, double value); NumericVector vectorDivide(NumericVector x, NumericVector y); NumericVector vectorMultiply(NumericVector x, double multiplier); NumericVector vectorMultiply(NumericVector x, NumericVector y); NumericVector vectorPow(NumericVector x, NumericVector y); NumericVector vectorPow2(NumericVector y, double exp); NumericVector vectorRepEachValue(NumericVector x, int kMax); double vectorProduct(NumericVector x); double vectorProduct(NumericVector x, NumericVector y); double round(double value, int digits); void vectorSumC(int i, int j, int kMax, double* x, NumericMatrix y); void vectorInitC(int i, int kMax, double* x, double value); NumericVector concat(NumericVector a, NumericVector b); NumericMatrix matrixAdd(NumericMatrix x, NumericMatrix y); NumericMatrix matrixSub(NumericMatrix x, NumericMatrix y); NumericMatrix matrixMultiply(NumericMatrix x, double y); NumericVector repInt(int x, int y); double bisection2(std::function f, double lower, double upper, double tolerance, int maxIter); double bizero(std::function f, double lower, double upper, double tolerance, int maxIter); double zeroin(std::function f, double lower, double upper, double tolerance, int maxIter); double max(NumericVector x); double min(NumericVector x); NumericVector range(int from, int to); NumericVector rangeVector(NumericVector x, int from, int to); std::string getCipheredValue(String x); void logDebug(std::string s); double getRandomTDistribution(double df, double ncp); #endif rpact/src/f_assertions.h0000644000176200001440000000170614435554745015046 0ustar liggesusers/** * * -- Type definitions -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 7019 $ * Last changed: $Date: 2023-05-31 07:23:47 +0200 (Mi, 31 Mai 2023) $ * Last changed by: $Author: pahlke $ * */ #include // [[Rcpp::plugins(cpp11)]] using namespace Rcpp; void assertIsInInterval(double x, Rcpp::CharacterVector xName, double lower, double upper, bool lowerInclusive, bool upperInclusive); void assertIsInInterval(double x, Rcpp::CharacterVector xName, double lower, double upper); rpact/src/f_simulation_survival_utilities.cpp0000644000176200001440000001105714332215406021401 0ustar liggesusers/** * * -- Simulation of survival data with group sequential and combination test -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 6617 $ * Last changed: $Date: 2022-10-18 13:06:16 +0200 (Di, 18 Okt 2022) $ * Last changed by: $Author: pahlke $ * */ #include // [[Rcpp::plugins(cpp11)]] #include "f_utilities.h" using namespace Rcpp; double findObservationTime( NumericVector accrualTime, NumericVector survivalTime, NumericVector dropoutTime, double requiredStageEvents) { int numberOfSubjects = accrualTime.size(); double upperBound = 1; double numberOfEvents; while (true) { numberOfEvents = 0; for (int i = 0; i < numberOfSubjects; i++) { if (accrualTime[i] + survivalTime[i] < upperBound && (R_IsNA((double) dropoutTime[i]) || dropoutTime[i] > survivalTime[i])) { numberOfEvents = numberOfEvents + 1; } } upperBound = 2 * upperBound; if (numberOfEvents > requiredStageEvents || upperBound > 1E12) { break; } } if (upperBound > 1E12) { return NA_REAL; } double lower = 0; double upper = upperBound; double time; while (true) { time = (lower + upper) / 2; numberOfEvents = 0; for (int i = 0; i < numberOfSubjects; i++) { if (accrualTime[i] + survivalTime[i] <= time && (R_IsNA((double) dropoutTime[i]) || dropoutTime[i] > survivalTime[i])) { numberOfEvents = numberOfEvents + 1; } } if (numberOfEvents >= requiredStageEvents) { upper = time; } else { lower = time; } if (upper - lower < 1E-05) { break; } } if (numberOfEvents > requiredStageEvents) { time -= 1E-05; } else if (numberOfEvents < requiredStageEvents) { time += 1E-05; } return time; } /** * ::Rf_pnorm5 identical to R::pnorm */ double getNormalDistribution(double p) { return R::pnorm(p, 0.0, 1.0, 1, 0); // p, mu, sigma, lt, lg } /** * ::Rf_qnorm5 identical to R::qnorm */ double getNormalQuantile(double p) { return R::qnorm(p, 0.0, 1.0, 1, 0); // p, mu, sigma, lt, lg } /** * ::Rf_rexp identical to * R::rexp(rate); * Rcpp::rexp(1, rate)[0]; */ double getRandomExponentialDistribution(double rate) { return Rcpp::rexp(1, rate)[0]; } /** * Weibull: (-log(1 - runif(0.0, 1.0)))^(1 / kappa) / rate */ double getRandomSurvivalDistribution(double rate, double kappa) { return pow(-log(1 - R::runif(0.0, 1.0)), 1 / kappa) / rate; } double getRandomPiecewiseExponentialDistribution( NumericVector cdfValues, NumericVector piecewiseLambda, NumericVector piecewiseSurvivalTime) { double y; NumericVector s; double p = R::runif(0.0, 1.0); int n = piecewiseSurvivalTime.size(); if (n == 0) { return -log(1 - p) / piecewiseLambda[0]; } for (int i = 0; i < n; i++) { if (p <= cdfValues[i]) { if (i == 0) { return -log(1 - p) / piecewiseLambda[0]; } y = piecewiseLambda[0] * piecewiseSurvivalTime[0]; if (i > 1) { s = vectorSum(piecewiseSurvivalTime[seq(1, i - 1)], -piecewiseSurvivalTime[seq(0, i - 2)]); y += vectorProduct(piecewiseLambda[seq(1, i - 1)], s); } return piecewiseSurvivalTime[i - 1] - (log(1 - p) + y) / piecewiseLambda[i]; } } if (n == 1) { return piecewiseSurvivalTime[0] - (log(1 - p) + piecewiseLambda[0] * piecewiseSurvivalTime[0]) / piecewiseLambda[1]; } s = vectorSum(piecewiseSurvivalTime[seq(1, n - 1)], -piecewiseSurvivalTime[seq(0, n - 2)]); y = piecewiseLambda[0] * piecewiseSurvivalTime[0] + vectorProduct(piecewiseLambda[seq(1, n - 1)], s); return piecewiseSurvivalTime[n - 1] - (log(1 - p) + y) / piecewiseLambda[n]; } bool isPiecewiseExponentialSurvivalEnabled(NumericVector lambdaVec2) { if (lambdaVec2.size() <= 1) { return false; } for (int i = 0; i < lambdaVec2.size(); i++) { if (R_IsNA((double) lambdaVec2[i])) { return false; } } return true; } double getLambdaByPi(double pi, double eventTime, double kappa) { return pow(-log(1 - pi), 1 / kappa) / eventTime; } double getPiByLambda(double lambda, double eventTime, double kappa) { return 1 - exp(-pow(lambda * eventTime, kappa)); } double getHazardRatio(double pi1, double pi2, double eventTime, double kappa) { return pow(getLambdaByPi(pi1, eventTime, kappa) / getLambdaByPi(pi2, eventTime, kappa), kappa); } rpact/src/f_simulation_base_means.cpp0000644000176200001440000005035514435554745017554 0ustar liggesusers/** * * -- Simulation of means with group sequential and combination test -- * * This file is part of the R package rpact: * Confirmatory Adaptive Clinical Trial Design and Analysis * * Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD * Licensed under "GNU Lesser General Public License" version 3 * License text can be found here: https://www.r-project.org/Licenses/LGPL-3 * * RPACT company website: https://www.rpact.com * rpact package website: https://www.rpact.org * * Contact us for information about our services: info@rpact.com * * File version: $Revision: 7019 $ * Last changed: $Date: 2023-05-31 07:23:47 +0200 (Mi, 31 Mai 2023) $ * Last changed by: $Author: pahlke $ * */ #include // [[Rcpp::plugins(cpp11)]] #include "f_utilities.h" #include "rpact_types.h" using namespace Rcpp; NumericVector getTestStatisticsMeans( double designNumber, NumericVector informationRates, int groups, bool normalApproximation, bool meanRatio, double thetaH0, NumericVector allocationRatioPlanned, NumericVector sampleSizesPerStage, NumericVector testStatisticsPerStage) { NumericVector pValuesSeparate = NumericVector(testStatisticsPerStage.size(), NA_REAL); double value = 1; int stage = sampleSizesPerStage.length(); double overallTestStatistic = vectorProduct(sqrt(sampleSizesPerStage), testStatisticsPerStage) / sqrt(sum(sampleSizesPerStage)); if (normalApproximation) { pValuesSeparate = 1.0 - Rcpp::pnorm(testStatisticsPerStage); } else { for (int i = 0; i < pValuesSeparate.length(); ++i) { double df = sampleSizesPerStage[i] - groups; pValuesSeparate[i] = 1.0 - R::pt((double) testStatisticsPerStage[i], df, true, false); } } if (designNumber == 1L) { if (normalApproximation) { value = overallTestStatistic; } else { value = getQNorm(R::pt(overallTestStatistic, sum(sampleSizesPerStage) - groups, true, false)); } } else if (designNumber == 2L) { if (stage == 1) { if (normalApproximation) { value = testStatisticsPerStage[0]; } else { value = getQNorm(R::pt((double) testStatisticsPerStage[0], (double) sampleSizesPerStage[0] - groups, true, false)); } } else { if (normalApproximation) { value = (sqrt((double) informationRates[0]) * testStatisticsPerStage[0] + vectorProduct(sqrt(tail(head(informationRates, stage), stage - 1) - head(informationRates, stage - 1)), tail(head(testStatisticsPerStage, stage), stage - 1))) / sqrt((double) informationRates[stage - 1]); } else { NumericVector helperVec; for (int i = 1; i < stage; i++) { helperVec.push_back(getQNorm(R::pt((double) testStatisticsPerStage[i], (double) sampleSizesPerStage[i] - groups, true, false))); } value = (sqrt((double) informationRates[0]) * getQNorm(R::pt((double) testStatisticsPerStage[0], (double) sampleSizesPerStage[0] - groups, true, false)) + vectorProduct(sqrt(tail(head(informationRates, stage), stage - 1) - head(informationRates, stage - 1)), helperVec)) / sqrt((double) informationRates[stage - 1]); } } } else if (designNumber == 3L) { NumericVector weightsFisher = rep(NA_REAL, stage); weightsFisher[0] = 1; if (stage > 1) { for (int i = 1; i < stage; ++i) { weightsFisher[i] = sqrt((double) informationRates[i] - informationRates[i - 1]) / sqrt((double) informationRates[0]); } } if (normalApproximation) { NumericVector helperVec = 1 - pnorm(head(testStatisticsPerStage, stage)); for (int i = 0; i < stage; ++i) { value *= pow((double) helperVec[i], (double) weightsFisher[i]); } } else { for (int i = 0; i < stage; ++i) { double q = testStatisticsPerStage[i]; int df = sampleSizesPerStage[i] - groups; double tDistribution = 1 - R::pt(q, df, true, false); value *= pow(tDistribution, (double) weightsFisher[i]); } } } double standardizedEffectEstimate; if (groups == 1) { standardizedEffectEstimate = overallTestStatistic / sqrt(sum(sampleSizesPerStage)); } else { NumericVector allocationRatios = rangeVector(allocationRatioPlanned, 0, stage - 1); double numerator = !meanRatio ? 1 : pow(thetaH0, 2); standardizedEffectEstimate = overallTestStatistic * sqrt(1 / sum(allocationRatios / (1 + allocationRatios) * sampleSizesPerStage) + numerator / sum(1 / (1 + allocationRatios) * sampleSizesPerStage)); } NumericVector result = NumericVector(3 + pValuesSeparate.size(), NA_REAL); result[0] = value; result[1] = overallTestStatistic; result[2] = standardizedEffectEstimate; for (int i = 0; i < pValuesSeparate.size(); i++) { result[3 + i] = pValuesSeparate[i]; } return result; } double getSimulationMeansStageSubjects( int stage, bool meanRatio, double thetaH0, int groups, NumericVector plannedSubjects, NumericVector allocationRatioPlanned, NumericVector minNumberOfSubjectsPerStage, NumericVector maxNumberOfSubjectsPerStage, NumericVector sampleSizesPerStage, double thetaH1, double stDevH1, double conditionalPower, double conditionalCriticalValue) { if (R_IsNA(conditionalPower)) { return plannedSubjects[stage - 1] - plannedSubjects[stage - 2]; } double thetaStandardized = thetaH1 / stDevH1; double mult = 1; if (groups == 2) { double allocationRatio = allocationRatioPlanned[stage - 1]; thetaH0 = meanRatio ? thetaH0 : 1; mult = 1.0 + 1.0 / allocationRatio + pow(thetaH0, 2) * (1.0 + allocationRatio); } double stageSubjects = pow(std::max(0.0, conditionalCriticalValue + getQNorm(conditionalPower)), 2) * mult / pow(std::max(1e-12, thetaStandardized), 2); return std::min( std::max((double) minNumberOfSubjectsPerStage[stage - 1], stageSubjects), (double) maxNumberOfSubjectsPerStage[stage - 1]); } List getSimulationStepMeans( int k, double kMax, double designNumber, NumericVector informationRates, NumericVector futilityBounds, NumericVector alpha0Vec, NumericVector criticalValues, bool meanRatio, double thetaH0, double alternative, double stDev, int groups, bool normalApproximation, NumericVector plannedSubjects, bool directionUpper, NumericVector allocationRatioPlanned, NumericVector minNumberOfSubjectsPerStage, NumericVector maxNumberOfSubjectsPerStage, double conditionalPower, double thetaH1, double stDevH1, double effectEstimate, NumericVector sampleSizesPerStage, NumericVector testStatisticsPerStage, NumericVector testStatistic, int calcSubjectsFunctionType, Nullable calcSubjectsFunctionR, Rcpp::XPtr calcSubjectsFunctionCppXPtr) { double nz, testResult, thetaStandardized, simulatedConditionalPower, conditionalCriticalValue; double stageSubjects = plannedSubjects[0]; double testStatisticValue; double criticalValue = criticalValues[k - 1]; // perform sample size size recalculation for stages 2, ..., kMax simulatedConditionalPower = 0; if (k > 1) { testStatisticValue = testStatistic[0]; // value // used effect size is either estimated from test statistic or pre-fixed if (R_IsNA(thetaH1)) { thetaH1 = effectEstimate; } else { thetaH1 = thetaH1 - thetaH0; } thetaStandardized = thetaH1 / stDevH1; if (!directionUpper) { thetaH1 = -thetaH1; thetaStandardized = -thetaStandardized; } // conditional critical value to reject the null hypotheses at the next stage of the trial if (designNumber == 3L) { conditionalCriticalValue = getOneMinusQNorm(pow(criticalValue / testStatisticValue, 1.0 / sqrt((double) (informationRates[k - 1] - informationRates[k - 2]) / informationRates[0]))); } else { conditionalCriticalValue = (criticalValue * sqrt((double) informationRates[k - 1]) - testStatisticValue * sqrt((double) informationRates[k - 2])) / sqrt((double) informationRates[k - 1] - informationRates[k - 2]); } if (calcSubjectsFunctionType == 1 && calcSubjectsFunctionR.isNotNull()) { stageSubjects = Rf_asReal( as(calcSubjectsFunctionR)( _["stage"] = k, _["meanRatio"] = meanRatio, _["thetaH0"] = thetaH0, _["groups"] = groups, _["plannedSubjects"] = plannedSubjects, _["sampleSizesPerStage"] = sampleSizesPerStage, _["allocationRatioPlanned"] = allocationRatioPlanned, _["minNumberOfSubjectsPerStage"] = minNumberOfSubjectsPerStage, _["maxNumberOfSubjectsPerStage"] = maxNumberOfSubjectsPerStage, _["conditionalPower"] = conditionalPower, _["thetaH1"] = thetaH1, _["stDevH1"] = stDevH1, _["conditionalCriticalValue"] = conditionalCriticalValue)); } else { calcSubjectsFunctionMeansPtr fun = *calcSubjectsFunctionCppXPtr; stageSubjects = fun( k, meanRatio, thetaH0, groups, plannedSubjects, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, sampleSizesPerStage, thetaH1, stDevH1, conditionalPower, conditionalCriticalValue); } // calculate conditional power for computed stageSubjects if (groups == 2) { double allocationRatio = allocationRatioPlanned[k - 1]; if (!meanRatio) { thetaStandardized = thetaStandardized * sqrt(allocationRatio) / (1.0 + allocationRatio); } else { thetaStandardized = thetaStandardized * sqrt(allocationRatio) / sqrt((1.0 + allocationRatio) * (1.0 + thetaH0 * allocationRatio)); } } simulatedConditionalPower = getOneMinusPNorm(conditionalCriticalValue - thetaStandardized * sqrt(stageSubjects)); } if (groups == 1) { nz = (alternative - thetaH0) / stDev * sqrt(stageSubjects); if (normalApproximation) { testResult = (2.0 * directionUpper - 1.0) * R::rnorm(nz, 1.0); } else { testResult = (2.0 * directionUpper - 1.0) * getRandomTDistribution(stageSubjects - 1, nz); } } else { double allocationRatio = allocationRatioPlanned[k - 1]; if (!meanRatio) { nz = (alternative - thetaH0) / stDev * sqrt(allocationRatio * stageSubjects) / (1 + allocationRatio); } else { nz = (alternative - thetaH0) / stDev * sqrt(allocationRatio * stageSubjects) / sqrt((1 + allocationRatio) * (1 + pow(thetaH0,2) * allocationRatio)); } if (normalApproximation) { testResult = (2.0 * directionUpper - 1.0) * R::rnorm(nz, 1.0); } else { testResult = (2.0 * directionUpper - 1.0) * getRandomTDistribution(stageSubjects - 2, nz); } } if (k > 1) { sampleSizesPerStage.push_back(stageSubjects); testStatisticsPerStage.push_back(testResult); } else { sampleSizesPerStage = NumericVector::create(stageSubjects); testStatisticsPerStage = NumericVector::create(testResult); } testStatistic = getTestStatisticsMeans(designNumber, informationRates, groups, normalApproximation, meanRatio, thetaH0, allocationRatioPlanned, sampleSizesPerStage, testStatisticsPerStage); testStatisticValue = testStatistic[0]; // value effectEstimate = testStatistic[2] * stDev; // standardizedEffectEstimate double simulatedRejections = 0; double simulatedFutilityStop = 0; bool trialStop = false; if (k == kMax) { trialStop = true; } if (designNumber == 3L) { if (!R_IsNA(testStatisticValue) && !R_IsNA(criticalValue) && testStatisticValue <= criticalValue) { simulatedRejections = 1; trialStop = true; } double testStatisticPValueSeparate = testStatistic[3 + k - 1]; // pValuesSeparate if (k < kMax && !R_IsNA(testStatisticPValueSeparate)) { double alpha0 = alpha0Vec[k - 1]; if (!R_IsNA(alpha0) && testStatisticPValueSeparate >= alpha0) { simulatedFutilityStop = 1; trialStop = true; } } } else { if (!R_IsNA(testStatisticValue) && !R_IsNA(criticalValue) && testStatisticValue >= criticalValue) { simulatedRejections = 1; trialStop = true; } if (k < kMax && !R_IsNA(testStatisticValue)) { double futilityBound = futilityBounds[k - 1]; if (!R_IsNA(futilityBound) && testStatisticValue <= futilityBound) { simulatedFutilityStop = 1; trialStop = true; } } } if (!directionUpper) { effectEstimate = -effectEstimate; } return List::create( _["trialStop"] = trialStop, _["sampleSizesPerStage"] = sampleSizesPerStage, _["testStatisticsPerStage"] = testStatisticsPerStage, _["testStatistic"] = testStatistic, _["effectEstimate"] = effectEstimate, _["simulatedSubjects"] = stageSubjects, _["simulatedRejections"] = simulatedRejections, _["simulatedFutilityStop"] = simulatedFutilityStop, _["simulatedConditionalPower"] = simulatedConditionalPower ); } Rcpp::XPtr getSimulationMeansStageSubjectsXPtr() { return Rcpp::XPtr(new calcSubjectsFunctionMeansPtr(&getSimulationMeansStageSubjects)); } // [[Rcpp::export]] List getSimulationMeansLoopCpp( NumericVector alternative, int kMax, int maxNumberOfIterations, int designNumber, NumericVector informationRates, NumericVector futilityBounds, NumericVector alpha0Vec, NumericVector criticalValues, bool meanRatio, double thetaH0, double stDev, int groups, bool normalApproximation, NumericVector plannedSubjects, bool directionUpper, NumericVector allocationRatioPlanned, NumericVector minNumberOfSubjectsPerStage, NumericVector maxNumberOfSubjectsPerStage, double conditionalPower, double thetaH1, double stDevH1, int calcSubjectsFunctionType, Nullable calcSubjectsFunctionR, SEXP calcSubjectsFunctionCpp) { Rcpp::XPtr calcSubjectsFunctionCppXPtr = getSimulationMeansStageSubjectsXPtr(); if (calcSubjectsFunctionType == 0) { calcSubjectsFunctionR = NULL; } else if (calcSubjectsFunctionType == 2) { calcSubjectsFunctionR = NULL; calcSubjectsFunctionCppXPtr = Rcpp::XPtr(calcSubjectsFunctionCpp); } int cols = alternative.length(); NumericMatrix sampleSizes(kMax, cols); std::fill(sampleSizes.begin(), sampleSizes.end(), 0.0) ; NumericMatrix rejectPerStage(kMax, cols); std::fill(rejectPerStage.begin(), rejectPerStage.end(), 0.0) ; NumericVector overallReject = NumericVector(cols, 0.0); NumericMatrix futilityPerStage(kMax - 1, cols); std::fill(futilityPerStage.begin(), futilityPerStage.end(), 0.0) ; NumericVector futilityStop = NumericVector(cols, 0.0); NumericMatrix iterations(kMax, cols); std::fill(iterations.begin(), iterations.end(), 0.0) ; NumericVector expectedNumberOfSubjects = NumericVector(cols, 0.0); NumericMatrix conditionalPowerAchieved (kMax, cols); std::fill(conditionalPowerAchieved.begin(), conditionalPowerAchieved.end(), NA_REAL) ; int len = alternative.length() * maxNumberOfIterations * kMax; NumericVector dataIterationNumber = NumericVector(len, NA_REAL); NumericVector dataStageNumber = NumericVector(len, NA_REAL); NumericVector dataAlternative = NumericVector(len, NA_REAL); NumericVector dataNumberOfSubjects = NumericVector(len, NA_REAL); NumericVector dataNumberOfCumulatedSubjects = NumericVector(len, NA_REAL); NumericVector dataRejectPerStage = NumericVector(len, NA_REAL); NumericVector dataFutilityPerStage = NumericVector(len, NA_REAL); NumericVector dataTestStatistic = NumericVector(len, NA_REAL); NumericVector dataTestStatisticsPerStage = NumericVector(len, NA_REAL); NumericVector dataTrialStop = NumericVector(len, NA_REAL); NumericVector dataConditionalPowerAchieved = NumericVector(len, NA_REAL); NumericVector dataEffectEstimate = NumericVector(len, NA_REAL); NumericVector dataPValuesSeparate = NumericVector(len, NA_REAL); bool trialStop; List stepResult; double effectEstimate; double simulatedSubjectsStep; double simulatedRejectionsStep; double simulatedFutilityStopStep; double simulatedConditionalPowerStep; NumericVector testStatistic; NumericVector sampleSizesPerStage; NumericVector testStatisticsPerStage; double index = 1; for (int i = 1; i <= alternative.length(); i++) { NumericVector simulatedSubjects(kMax); NumericVector simulatedRejections(kMax); NumericVector simulatedFutilityStop(kMax - 1); NumericVector simulatedConditionalPower(kMax); for (int j = 1; j <= maxNumberOfIterations; j++) { trialStop = false; effectEstimate = 0; for (int k = 1; k <= kMax; k++) { if (trialStop) { break; } stepResult = getSimulationStepMeans( k, kMax, designNumber, informationRates, futilityBounds, alpha0Vec, criticalValues, meanRatio, thetaH0, (double) alternative[i - 1], stDev, groups, normalApproximation, plannedSubjects, directionUpper, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, thetaH1, stDevH1, effectEstimate, sampleSizesPerStage, testStatisticsPerStage, testStatistic, calcSubjectsFunctionType, calcSubjectsFunctionR, calcSubjectsFunctionCppXPtr); trialStop = stepResult["trialStop"]; sampleSizesPerStage = stepResult["sampleSizesPerStage"]; testStatisticsPerStage = stepResult["testStatisticsPerStage"]; testStatistic = stepResult["testStatistic"]; simulatedSubjectsStep = stepResult["simulatedSubjects"]; simulatedRejectionsStep = stepResult["simulatedRejections"]; simulatedFutilityStopStep = stepResult["simulatedFutilityStop"]; effectEstimate = stepResult["effectEstimate"]; if (k > 1) { simulatedConditionalPowerStep = stepResult["simulatedConditionalPower"]; } else { simulatedConditionalPowerStep = NA_REAL; } iterations(k - 1, i - 1) = iterations(k - 1, i - 1) + 1; simulatedSubjects[k - 1] = simulatedSubjects[k - 1] + simulatedSubjectsStep; simulatedRejections[k - 1] = simulatedRejections[k - 1] + simulatedRejectionsStep; if (k < kMax) { simulatedFutilityStop[k - 1] = simulatedFutilityStop[k - 1] + simulatedFutilityStopStep; } simulatedConditionalPower[k - 1] = simulatedConditionalPower[k - 1] + simulatedConditionalPowerStep; dataIterationNumber[index - 1] = j; dataStageNumber[index - 1] = k; dataAlternative[index - 1] = alternative[i - 1]; dataNumberOfSubjects[index - 1] = simulatedSubjectsStep; dataNumberOfCumulatedSubjects[index - 1] = sum(sampleSizesPerStage); dataRejectPerStage[index - 1] = simulatedRejectionsStep; dataFutilityPerStage[index - 1] = simulatedFutilityStopStep; dataTestStatistic[index - 1] = testStatistic[0]; // value dataTestStatisticsPerStage[index - 1] = testStatisticsPerStage[k - 1]; dataTrialStop[index - 1] = trialStop; dataConditionalPowerAchieved[index - 1] = simulatedConditionalPowerStep; dataEffectEstimate[index - 1] = effectEstimate; if (designNumber == 3L) { dataPValuesSeparate[index - 1] = testStatistic[3 + k - 1]; // pValuesSeparate } index++; } } double simulatedOverallSubjects = sum(rangeVector(simulatedSubjects, 0, kMax - 1)); sampleSizes(_, i - 1) = simulatedSubjects / iterations(_, i - 1); rejectPerStage(_, i - 1) = simulatedRejections / maxNumberOfIterations; overallReject[i - 1] = sum(simulatedRejections / maxNumberOfIterations); futilityPerStage(_, i - 1) = simulatedFutilityStop / maxNumberOfIterations; futilityStop[i - 1] = sum(simulatedFutilityStop / maxNumberOfIterations); expectedNumberOfSubjects[i - 1] = simulatedOverallSubjects / maxNumberOfIterations; if (kMax > 1) { NumericVector helper = conditionalPowerAchieved(_, i - 1); NumericVector result = simulatedConditionalPower / iterations(_, i - 1); for(int n = 1; n < kMax; n++) { helper[n] = result[n]; } conditionalPowerAchieved(_, i - 1) = helper; } } DataFrame data = DataFrame::create( _["iterationNumber"] = dataIterationNumber, _["stageNumber"] = dataStageNumber, _["alternative"] = dataAlternative, _["numberOfSubjects"] = dataNumberOfSubjects, _["numberOfCumulatedSubjects"] = dataNumberOfCumulatedSubjects, _["rejectPerStage"] = dataRejectPerStage, _["futilityPerStage"] = dataFutilityPerStage, _["testStatistic"] = dataTestStatistic, _["testStatisticsPerStage"] = dataTestStatisticsPerStage, _["effectEstimate"] = dataEffectEstimate, _["trialStop"] = dataTrialStop, _["conditionalPowerAchieved"] = dataConditionalPowerAchieved, _["pValue"] = dataPValuesSeparate ); return List::create( _["sampleSizes"] = sampleSizes, _["iterations"] = iterations, _["rejectPerStage"] = rejectPerStage, _["overallReject"] = overallReject, _["futilityPerStage"] = futilityPerStage, _["futilityStop"] = futilityStop, _["expectedNumberOfSubjects"] = expectedNumberOfSubjects, _["conditionalPowerAchieved"] = conditionalPowerAchieved, _["data"] = data ); } rpact/src/RcppExports.cpp0000644000176200001440000010630614450543724015165 0ustar liggesusers// Generated by using Rcpp::compileAttributes() -> do not edit by hand // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #include "rpact_types.h" #include using namespace Rcpp; #ifdef RCPP_USE_GLOBAL_ROSTREAM Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); #endif // getFisherCombinationSizeCpp double getFisherCombinationSizeCpp(double kMax, NumericVector alpha0Vec, NumericVector criticalValues, NumericVector tVec, NumericVector cases); RcppExport SEXP _rpact_getFisherCombinationSizeCpp(SEXP kMaxSEXP, SEXP alpha0VecSEXP, SEXP criticalValuesSEXP, SEXP tVecSEXP, SEXP casesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< NumericVector >::type alpha0Vec(alpha0VecSEXP); Rcpp::traits::input_parameter< NumericVector >::type criticalValues(criticalValuesSEXP); Rcpp::traits::input_parameter< NumericVector >::type tVec(tVecSEXP); Rcpp::traits::input_parameter< NumericVector >::type cases(casesSEXP); rcpp_result_gen = Rcpp::wrap(getFisherCombinationSizeCpp(kMax, alpha0Vec, criticalValues, tVec, cases)); return rcpp_result_gen; END_RCPP } // getSimulatedAlphaCpp double getSimulatedAlphaCpp(int kMax, NumericVector alpha0, NumericVector criticalValues, NumericVector tVec, int iterations); RcppExport SEXP _rpact_getSimulatedAlphaCpp(SEXP kMaxSEXP, SEXP alpha0SEXP, SEXP criticalValuesSEXP, SEXP tVecSEXP, SEXP iterationsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< NumericVector >::type alpha0(alpha0SEXP); Rcpp::traits::input_parameter< NumericVector >::type criticalValues(criticalValuesSEXP); Rcpp::traits::input_parameter< NumericVector >::type tVec(tVecSEXP); Rcpp::traits::input_parameter< int >::type iterations(iterationsSEXP); rcpp_result_gen = Rcpp::wrap(getSimulatedAlphaCpp(kMax, alpha0, criticalValues, tVec, iterations)); return rcpp_result_gen; END_RCPP } // getFisherCombinationCasesCpp NumericVector getFisherCombinationCasesCpp(int kMax, NumericVector tVec); RcppExport SEXP _rpact_getFisherCombinationCasesCpp(SEXP kMaxSEXP, SEXP tVecSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< NumericVector >::type tVec(tVecSEXP); rcpp_result_gen = Rcpp::wrap(getFisherCombinationCasesCpp(kMax, tVec)); return rcpp_result_gen; END_RCPP } // getDesignFisherTryCpp List getDesignFisherTryCpp(int kMax, double alpha, double tolerance, NumericVector criticalValues, NumericVector scale, NumericVector alpha0Vec, NumericVector userAlphaSpending, String method); RcppExport SEXP _rpact_getDesignFisherTryCpp(SEXP kMaxSEXP, SEXP alphaSEXP, SEXP toleranceSEXP, SEXP criticalValuesSEXP, SEXP scaleSEXP, SEXP alpha0VecSEXP, SEXP userAlphaSpendingSEXP, SEXP methodSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP); Rcpp::traits::input_parameter< double >::type tolerance(toleranceSEXP); Rcpp::traits::input_parameter< NumericVector >::type criticalValues(criticalValuesSEXP); Rcpp::traits::input_parameter< NumericVector >::type scale(scaleSEXP); Rcpp::traits::input_parameter< NumericVector >::type alpha0Vec(alpha0VecSEXP); Rcpp::traits::input_parameter< NumericVector >::type userAlphaSpending(userAlphaSpendingSEXP); Rcpp::traits::input_parameter< String >::type method(methodSEXP); rcpp_result_gen = Rcpp::wrap(getDesignFisherTryCpp(kMax, alpha, tolerance, criticalValues, scale, alpha0Vec, userAlphaSpending, method)); return rcpp_result_gen; END_RCPP } // getGroupSequentialProbabilitiesCpp NumericMatrix getGroupSequentialProbabilitiesCpp(NumericMatrix decisionMatrix, NumericVector informationRates); RcppExport SEXP _rpact_getGroupSequentialProbabilitiesCpp(SEXP decisionMatrixSEXP, SEXP informationRatesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type decisionMatrix(decisionMatrixSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); rcpp_result_gen = Rcpp::wrap(getGroupSequentialProbabilitiesCpp(decisionMatrix, informationRates)); return rcpp_result_gen; END_RCPP } // getDesignGroupSequentialPampallonaTsiatisCpp List getDesignGroupSequentialPampallonaTsiatisCpp(double tolerance, double beta, double alpha, double kMax, double deltaPT0, double deltaPT1, NumericVector informationRates, int sided, bool bindingFutility); RcppExport SEXP _rpact_getDesignGroupSequentialPampallonaTsiatisCpp(SEXP toleranceSEXP, SEXP betaSEXP, SEXP alphaSEXP, SEXP kMaxSEXP, SEXP deltaPT0SEXP, SEXP deltaPT1SEXP, SEXP informationRatesSEXP, SEXP sidedSEXP, SEXP bindingFutilitySEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type tolerance(toleranceSEXP); Rcpp::traits::input_parameter< double >::type beta(betaSEXP); Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP); Rcpp::traits::input_parameter< double >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< double >::type deltaPT0(deltaPT0SEXP); Rcpp::traits::input_parameter< double >::type deltaPT1(deltaPT1SEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< int >::type sided(sidedSEXP); Rcpp::traits::input_parameter< bool >::type bindingFutility(bindingFutilitySEXP); rcpp_result_gen = Rcpp::wrap(getDesignGroupSequentialPampallonaTsiatisCpp(tolerance, beta, alpha, kMax, deltaPT0, deltaPT1, informationRates, sided, bindingFutility)); return rcpp_result_gen; END_RCPP } // getDesignGroupSequentialUserDefinedAlphaSpendingCpp NumericVector getDesignGroupSequentialUserDefinedAlphaSpendingCpp(int kMax, NumericVector userAlphaSpending, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance); RcppExport SEXP _rpact_getDesignGroupSequentialUserDefinedAlphaSpendingCpp(SEXP kMaxSEXP, SEXP userAlphaSpendingSEXP, SEXP sidedSEXP, SEXP informationRatesSEXP, SEXP bindingFutilitySEXP, SEXP futilityBoundsSEXP, SEXP toleranceSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< NumericVector >::type userAlphaSpending(userAlphaSpendingSEXP); Rcpp::traits::input_parameter< double >::type sided(sidedSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< bool >::type bindingFutility(bindingFutilitySEXP); Rcpp::traits::input_parameter< NumericVector >::type futilityBounds(futilityBoundsSEXP); Rcpp::traits::input_parameter< double >::type tolerance(toleranceSEXP); rcpp_result_gen = Rcpp::wrap(getDesignGroupSequentialUserDefinedAlphaSpendingCpp(kMax, userAlphaSpending, sided, informationRates, bindingFutility, futilityBounds, tolerance)); return rcpp_result_gen; END_RCPP } // getDesignGroupSequentialAlphaSpendingCpp NumericVector getDesignGroupSequentialAlphaSpendingCpp(int kMax, double alpha, double gammaA, String typeOfDesign, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance); RcppExport SEXP _rpact_getDesignGroupSequentialAlphaSpendingCpp(SEXP kMaxSEXP, SEXP alphaSEXP, SEXP gammaASEXP, SEXP typeOfDesignSEXP, SEXP sidedSEXP, SEXP informationRatesSEXP, SEXP bindingFutilitySEXP, SEXP futilityBoundsSEXP, SEXP toleranceSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP); Rcpp::traits::input_parameter< double >::type gammaA(gammaASEXP); Rcpp::traits::input_parameter< String >::type typeOfDesign(typeOfDesignSEXP); Rcpp::traits::input_parameter< double >::type sided(sidedSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< bool >::type bindingFutility(bindingFutilitySEXP); Rcpp::traits::input_parameter< NumericVector >::type futilityBounds(futilityBoundsSEXP); Rcpp::traits::input_parameter< double >::type tolerance(toleranceSEXP); rcpp_result_gen = Rcpp::wrap(getDesignGroupSequentialAlphaSpendingCpp(kMax, alpha, gammaA, typeOfDesign, sided, informationRates, bindingFutility, futilityBounds, tolerance)); return rcpp_result_gen; END_RCPP } // getDesignGroupSequentialDeltaWTCpp NumericVector getDesignGroupSequentialDeltaWTCpp(int kMax, double alpha, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance, double deltaWT); RcppExport SEXP _rpact_getDesignGroupSequentialDeltaWTCpp(SEXP kMaxSEXP, SEXP alphaSEXP, SEXP sidedSEXP, SEXP informationRatesSEXP, SEXP bindingFutilitySEXP, SEXP futilityBoundsSEXP, SEXP toleranceSEXP, SEXP deltaWTSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP); Rcpp::traits::input_parameter< double >::type sided(sidedSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< bool >::type bindingFutility(bindingFutilitySEXP); Rcpp::traits::input_parameter< NumericVector >::type futilityBounds(futilityBoundsSEXP); Rcpp::traits::input_parameter< double >::type tolerance(toleranceSEXP); Rcpp::traits::input_parameter< double >::type deltaWT(deltaWTSEXP); rcpp_result_gen = Rcpp::wrap(getDesignGroupSequentialDeltaWTCpp(kMax, alpha, sided, informationRates, bindingFutility, futilityBounds, tolerance, deltaWT)); return rcpp_result_gen; END_RCPP } // getDesignGroupSequentialPocockCpp NumericVector getDesignGroupSequentialPocockCpp(int kMax, double alpha, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance); RcppExport SEXP _rpact_getDesignGroupSequentialPocockCpp(SEXP kMaxSEXP, SEXP alphaSEXP, SEXP sidedSEXP, SEXP informationRatesSEXP, SEXP bindingFutilitySEXP, SEXP futilityBoundsSEXP, SEXP toleranceSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP); Rcpp::traits::input_parameter< double >::type sided(sidedSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< bool >::type bindingFutility(bindingFutilitySEXP); Rcpp::traits::input_parameter< NumericVector >::type futilityBounds(futilityBoundsSEXP); Rcpp::traits::input_parameter< double >::type tolerance(toleranceSEXP); rcpp_result_gen = Rcpp::wrap(getDesignGroupSequentialPocockCpp(kMax, alpha, sided, informationRates, bindingFutility, futilityBounds, tolerance)); return rcpp_result_gen; END_RCPP } // getDesignGroupSequentialOBrienAndFlemingCpp NumericVector getDesignGroupSequentialOBrienAndFlemingCpp(int kMax, double alpha, double sided, NumericVector informationRates, bool bindingFutility, NumericVector futilityBounds, double tolerance); RcppExport SEXP _rpact_getDesignGroupSequentialOBrienAndFlemingCpp(SEXP kMaxSEXP, SEXP alphaSEXP, SEXP sidedSEXP, SEXP informationRatesSEXP, SEXP bindingFutilitySEXP, SEXP futilityBoundsSEXP, SEXP toleranceSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP); Rcpp::traits::input_parameter< double >::type sided(sidedSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< bool >::type bindingFutility(bindingFutilitySEXP); Rcpp::traits::input_parameter< NumericVector >::type futilityBounds(futilityBoundsSEXP); Rcpp::traits::input_parameter< double >::type tolerance(toleranceSEXP); rcpp_result_gen = Rcpp::wrap(getDesignGroupSequentialOBrienAndFlemingCpp(kMax, alpha, sided, informationRates, bindingFutility, futilityBounds, tolerance)); return rcpp_result_gen; END_RCPP } // getDesignGroupSequentialBetaSpendingCpp List getDesignGroupSequentialBetaSpendingCpp(NumericVector criticalValues, int kMax, NumericVector userAlphaSpending, NumericVector userBetaSpending, NumericVector informationRates, bool bindingFutility, double tolerance, String typeOfDesign, String typeBetaSpending, double gammaA, double gammaB, double alpha, double beta, double sided, bool betaAdjustment, bool twoSidedPower); RcppExport SEXP _rpact_getDesignGroupSequentialBetaSpendingCpp(SEXP criticalValuesSEXP, SEXP kMaxSEXP, SEXP userAlphaSpendingSEXP, SEXP userBetaSpendingSEXP, SEXP informationRatesSEXP, SEXP bindingFutilitySEXP, SEXP toleranceSEXP, SEXP typeOfDesignSEXP, SEXP typeBetaSpendingSEXP, SEXP gammaASEXP, SEXP gammaBSEXP, SEXP alphaSEXP, SEXP betaSEXP, SEXP sidedSEXP, SEXP betaAdjustmentSEXP, SEXP twoSidedPowerSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type criticalValues(criticalValuesSEXP); Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< NumericVector >::type userAlphaSpending(userAlphaSpendingSEXP); Rcpp::traits::input_parameter< NumericVector >::type userBetaSpending(userBetaSpendingSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< bool >::type bindingFutility(bindingFutilitySEXP); Rcpp::traits::input_parameter< double >::type tolerance(toleranceSEXP); Rcpp::traits::input_parameter< String >::type typeOfDesign(typeOfDesignSEXP); Rcpp::traits::input_parameter< String >::type typeBetaSpending(typeBetaSpendingSEXP); Rcpp::traits::input_parameter< double >::type gammaA(gammaASEXP); Rcpp::traits::input_parameter< double >::type gammaB(gammaBSEXP); Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP); Rcpp::traits::input_parameter< double >::type beta(betaSEXP); Rcpp::traits::input_parameter< double >::type sided(sidedSEXP); Rcpp::traits::input_parameter< bool >::type betaAdjustment(betaAdjustmentSEXP); Rcpp::traits::input_parameter< bool >::type twoSidedPower(twoSidedPowerSEXP); rcpp_result_gen = Rcpp::wrap(getDesignGroupSequentialBetaSpendingCpp(criticalValues, kMax, userAlphaSpending, userBetaSpending, informationRates, bindingFutility, tolerance, typeOfDesign, typeBetaSpending, gammaA, gammaB, alpha, beta, sided, betaAdjustment, twoSidedPower)); return rcpp_result_gen; END_RCPP } // getDesignGroupSequentialUserDefinedBetaSpendingCpp List getDesignGroupSequentialUserDefinedBetaSpendingCpp(NumericVector criticalValues, int kMax, NumericVector userAlphaSpending, NumericVector userBetaSpending, double sided, NumericVector informationRates, bool bindingFutility, double tolerance, String typeOfDesign, double gammaA, double alpha, bool betaAdjustment, bool twoSidedPower); RcppExport SEXP _rpact_getDesignGroupSequentialUserDefinedBetaSpendingCpp(SEXP criticalValuesSEXP, SEXP kMaxSEXP, SEXP userAlphaSpendingSEXP, SEXP userBetaSpendingSEXP, SEXP sidedSEXP, SEXP informationRatesSEXP, SEXP bindingFutilitySEXP, SEXP toleranceSEXP, SEXP typeOfDesignSEXP, SEXP gammaASEXP, SEXP alphaSEXP, SEXP betaAdjustmentSEXP, SEXP twoSidedPowerSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type criticalValues(criticalValuesSEXP); Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< NumericVector >::type userAlphaSpending(userAlphaSpendingSEXP); Rcpp::traits::input_parameter< NumericVector >::type userBetaSpending(userBetaSpendingSEXP); Rcpp::traits::input_parameter< double >::type sided(sidedSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< bool >::type bindingFutility(bindingFutilitySEXP); Rcpp::traits::input_parameter< double >::type tolerance(toleranceSEXP); Rcpp::traits::input_parameter< String >::type typeOfDesign(typeOfDesignSEXP); Rcpp::traits::input_parameter< double >::type gammaA(gammaASEXP); Rcpp::traits::input_parameter< double >::type alpha(alphaSEXP); Rcpp::traits::input_parameter< bool >::type betaAdjustment(betaAdjustmentSEXP); Rcpp::traits::input_parameter< bool >::type twoSidedPower(twoSidedPowerSEXP); rcpp_result_gen = Rcpp::wrap(getDesignGroupSequentialUserDefinedBetaSpendingCpp(criticalValues, kMax, userAlphaSpending, userBetaSpending, sided, informationRates, bindingFutility, tolerance, typeOfDesign, gammaA, alpha, betaAdjustment, twoSidedPower)); return rcpp_result_gen; END_RCPP } // getSimulationMeansLoopCpp List getSimulationMeansLoopCpp(NumericVector alternative, int kMax, int maxNumberOfIterations, int designNumber, NumericVector informationRates, NumericVector futilityBounds, NumericVector alpha0Vec, NumericVector criticalValues, bool meanRatio, double thetaH0, double stDev, int groups, bool normalApproximation, NumericVector plannedSubjects, bool directionUpper, NumericVector allocationRatioPlanned, NumericVector minNumberOfSubjectsPerStage, NumericVector maxNumberOfSubjectsPerStage, double conditionalPower, double thetaH1, double stDevH1, int calcSubjectsFunctionType, Nullable calcSubjectsFunctionR, SEXP calcSubjectsFunctionCpp); RcppExport SEXP _rpact_getSimulationMeansLoopCpp(SEXP alternativeSEXP, SEXP kMaxSEXP, SEXP maxNumberOfIterationsSEXP, SEXP designNumberSEXP, SEXP informationRatesSEXP, SEXP futilityBoundsSEXP, SEXP alpha0VecSEXP, SEXP criticalValuesSEXP, SEXP meanRatioSEXP, SEXP thetaH0SEXP, SEXP stDevSEXP, SEXP groupsSEXP, SEXP normalApproximationSEXP, SEXP plannedSubjectsSEXP, SEXP directionUpperSEXP, SEXP allocationRatioPlannedSEXP, SEXP minNumberOfSubjectsPerStageSEXP, SEXP maxNumberOfSubjectsPerStageSEXP, SEXP conditionalPowerSEXP, SEXP thetaH1SEXP, SEXP stDevH1SEXP, SEXP calcSubjectsFunctionTypeSEXP, SEXP calcSubjectsFunctionRSEXP, SEXP calcSubjectsFunctionCppSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type alternative(alternativeSEXP); Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< int >::type maxNumberOfIterations(maxNumberOfIterationsSEXP); Rcpp::traits::input_parameter< int >::type designNumber(designNumberSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< NumericVector >::type futilityBounds(futilityBoundsSEXP); Rcpp::traits::input_parameter< NumericVector >::type alpha0Vec(alpha0VecSEXP); Rcpp::traits::input_parameter< NumericVector >::type criticalValues(criticalValuesSEXP); Rcpp::traits::input_parameter< bool >::type meanRatio(meanRatioSEXP); Rcpp::traits::input_parameter< double >::type thetaH0(thetaH0SEXP); Rcpp::traits::input_parameter< double >::type stDev(stDevSEXP); Rcpp::traits::input_parameter< int >::type groups(groupsSEXP); Rcpp::traits::input_parameter< bool >::type normalApproximation(normalApproximationSEXP); Rcpp::traits::input_parameter< NumericVector >::type plannedSubjects(plannedSubjectsSEXP); Rcpp::traits::input_parameter< bool >::type directionUpper(directionUpperSEXP); Rcpp::traits::input_parameter< NumericVector >::type allocationRatioPlanned(allocationRatioPlannedSEXP); Rcpp::traits::input_parameter< NumericVector >::type minNumberOfSubjectsPerStage(minNumberOfSubjectsPerStageSEXP); Rcpp::traits::input_parameter< NumericVector >::type maxNumberOfSubjectsPerStage(maxNumberOfSubjectsPerStageSEXP); Rcpp::traits::input_parameter< double >::type conditionalPower(conditionalPowerSEXP); Rcpp::traits::input_parameter< double >::type thetaH1(thetaH1SEXP); Rcpp::traits::input_parameter< double >::type stDevH1(stDevH1SEXP); Rcpp::traits::input_parameter< int >::type calcSubjectsFunctionType(calcSubjectsFunctionTypeSEXP); Rcpp::traits::input_parameter< Nullable >::type calcSubjectsFunctionR(calcSubjectsFunctionRSEXP); Rcpp::traits::input_parameter< SEXP >::type calcSubjectsFunctionCpp(calcSubjectsFunctionCppSEXP); rcpp_result_gen = Rcpp::wrap(getSimulationMeansLoopCpp(alternative, kMax, maxNumberOfIterations, designNumber, informationRates, futilityBounds, alpha0Vec, criticalValues, meanRatio, thetaH0, stDev, groups, normalApproximation, plannedSubjects, directionUpper, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, thetaH1, stDevH1, calcSubjectsFunctionType, calcSubjectsFunctionR, calcSubjectsFunctionCpp)); return rcpp_result_gen; END_RCPP } // getSimulationRatesCpp List getSimulationRatesCpp(int kMax, NumericVector informationRates, NumericVector criticalValues, NumericVector pi1, double pi2, int maxNumberOfIterations, int designNumber, int groups, NumericVector futilityBounds, NumericVector alpha0Vec, NumericVector minNumberOfSubjectsPerStage, NumericVector maxNumberOfSubjectsPerStage, NumericVector conditionalPower, NumericVector pi1H1, NumericVector pi2H1, bool normalApproximation, NumericVector plannedSubjects, bool directionUpper, NumericVector allocationRatioPlanned, bool riskRatio, double thetaH0, int calcSubjectsFunctionType, Nullable calcSubjectsFunctionR, SEXP calcSubjectsFunctionCpp); RcppExport SEXP _rpact_getSimulationRatesCpp(SEXP kMaxSEXP, SEXP informationRatesSEXP, SEXP criticalValuesSEXP, SEXP pi1SEXP, SEXP pi2SEXP, SEXP maxNumberOfIterationsSEXP, SEXP designNumberSEXP, SEXP groupsSEXP, SEXP futilityBoundsSEXP, SEXP alpha0VecSEXP, SEXP minNumberOfSubjectsPerStageSEXP, SEXP maxNumberOfSubjectsPerStageSEXP, SEXP conditionalPowerSEXP, SEXP pi1H1SEXP, SEXP pi2H1SEXP, SEXP normalApproximationSEXP, SEXP plannedSubjectsSEXP, SEXP directionUpperSEXP, SEXP allocationRatioPlannedSEXP, SEXP riskRatioSEXP, SEXP thetaH0SEXP, SEXP calcSubjectsFunctionTypeSEXP, SEXP calcSubjectsFunctionRSEXP, SEXP calcSubjectsFunctionCppSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< NumericVector >::type criticalValues(criticalValuesSEXP); Rcpp::traits::input_parameter< NumericVector >::type pi1(pi1SEXP); Rcpp::traits::input_parameter< double >::type pi2(pi2SEXP); Rcpp::traits::input_parameter< int >::type maxNumberOfIterations(maxNumberOfIterationsSEXP); Rcpp::traits::input_parameter< int >::type designNumber(designNumberSEXP); Rcpp::traits::input_parameter< int >::type groups(groupsSEXP); Rcpp::traits::input_parameter< NumericVector >::type futilityBounds(futilityBoundsSEXP); Rcpp::traits::input_parameter< NumericVector >::type alpha0Vec(alpha0VecSEXP); Rcpp::traits::input_parameter< NumericVector >::type minNumberOfSubjectsPerStage(minNumberOfSubjectsPerStageSEXP); Rcpp::traits::input_parameter< NumericVector >::type maxNumberOfSubjectsPerStage(maxNumberOfSubjectsPerStageSEXP); Rcpp::traits::input_parameter< NumericVector >::type conditionalPower(conditionalPowerSEXP); Rcpp::traits::input_parameter< NumericVector >::type pi1H1(pi1H1SEXP); Rcpp::traits::input_parameter< NumericVector >::type pi2H1(pi2H1SEXP); Rcpp::traits::input_parameter< bool >::type normalApproximation(normalApproximationSEXP); Rcpp::traits::input_parameter< NumericVector >::type plannedSubjects(plannedSubjectsSEXP); Rcpp::traits::input_parameter< bool >::type directionUpper(directionUpperSEXP); Rcpp::traits::input_parameter< NumericVector >::type allocationRatioPlanned(allocationRatioPlannedSEXP); Rcpp::traits::input_parameter< bool >::type riskRatio(riskRatioSEXP); Rcpp::traits::input_parameter< double >::type thetaH0(thetaH0SEXP); Rcpp::traits::input_parameter< int >::type calcSubjectsFunctionType(calcSubjectsFunctionTypeSEXP); Rcpp::traits::input_parameter< Nullable >::type calcSubjectsFunctionR(calcSubjectsFunctionRSEXP); Rcpp::traits::input_parameter< SEXP >::type calcSubjectsFunctionCpp(calcSubjectsFunctionCppSEXP); rcpp_result_gen = Rcpp::wrap(getSimulationRatesCpp(kMax, informationRates, criticalValues, pi1, pi2, maxNumberOfIterations, designNumber, groups, futilityBounds, alpha0Vec, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, pi1H1, pi2H1, normalApproximation, plannedSubjects, directionUpper, allocationRatioPlanned, riskRatio, thetaH0, calcSubjectsFunctionType, calcSubjectsFunctionR, calcSubjectsFunctionCpp)); return rcpp_result_gen; END_RCPP } // getSimulationSurvivalCpp List getSimulationSurvivalCpp(int designNumber, int kMax, int sided, NumericVector criticalValues, NumericVector informationRates, double conditionalPower, NumericVector plannedEvents, double thetaH1, NumericVector minNumberOfEventsPerStage, NumericVector maxNumberOfEventsPerStage, bool directionUpper, double allocationRatioPlanned, NumericVector accrualTime, IntegerVector treatmentGroup, double thetaH0, NumericVector futilityBounds, NumericVector alpha0Vec, NumericVector pi1Vec, double pi2, double eventTime, NumericVector piecewiseSurvivalTime, NumericVector cdfValues1, NumericVector cdfValues2, NumericVector lambdaVec1, NumericVector lambdaVec2, NumericVector phi, int maxNumberOfSubjects, int maxNumberOfIterations, int maxNumberOfRawDatasetsPerStage, double kappa, int calcEventsFunctionType, Nullable calcEventsFunctionR, SEXP calcEventsFunctionCpp); RcppExport SEXP _rpact_getSimulationSurvivalCpp(SEXP designNumberSEXP, SEXP kMaxSEXP, SEXP sidedSEXP, SEXP criticalValuesSEXP, SEXP informationRatesSEXP, SEXP conditionalPowerSEXP, SEXP plannedEventsSEXP, SEXP thetaH1SEXP, SEXP minNumberOfEventsPerStageSEXP, SEXP maxNumberOfEventsPerStageSEXP, SEXP directionUpperSEXP, SEXP allocationRatioPlannedSEXP, SEXP accrualTimeSEXP, SEXP treatmentGroupSEXP, SEXP thetaH0SEXP, SEXP futilityBoundsSEXP, SEXP alpha0VecSEXP, SEXP pi1VecSEXP, SEXP pi2SEXP, SEXP eventTimeSEXP, SEXP piecewiseSurvivalTimeSEXP, SEXP cdfValues1SEXP, SEXP cdfValues2SEXP, SEXP lambdaVec1SEXP, SEXP lambdaVec2SEXP, SEXP phiSEXP, SEXP maxNumberOfSubjectsSEXP, SEXP maxNumberOfIterationsSEXP, SEXP maxNumberOfRawDatasetsPerStageSEXP, SEXP kappaSEXP, SEXP calcEventsFunctionTypeSEXP, SEXP calcEventsFunctionRSEXP, SEXP calcEventsFunctionCppSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type designNumber(designNumberSEXP); Rcpp::traits::input_parameter< int >::type kMax(kMaxSEXP); Rcpp::traits::input_parameter< int >::type sided(sidedSEXP); Rcpp::traits::input_parameter< NumericVector >::type criticalValues(criticalValuesSEXP); Rcpp::traits::input_parameter< NumericVector >::type informationRates(informationRatesSEXP); Rcpp::traits::input_parameter< double >::type conditionalPower(conditionalPowerSEXP); Rcpp::traits::input_parameter< NumericVector >::type plannedEvents(plannedEventsSEXP); Rcpp::traits::input_parameter< double >::type thetaH1(thetaH1SEXP); Rcpp::traits::input_parameter< NumericVector >::type minNumberOfEventsPerStage(minNumberOfEventsPerStageSEXP); Rcpp::traits::input_parameter< NumericVector >::type maxNumberOfEventsPerStage(maxNumberOfEventsPerStageSEXP); Rcpp::traits::input_parameter< bool >::type directionUpper(directionUpperSEXP); Rcpp::traits::input_parameter< double >::type allocationRatioPlanned(allocationRatioPlannedSEXP); Rcpp::traits::input_parameter< NumericVector >::type accrualTime(accrualTimeSEXP); Rcpp::traits::input_parameter< IntegerVector >::type treatmentGroup(treatmentGroupSEXP); Rcpp::traits::input_parameter< double >::type thetaH0(thetaH0SEXP); Rcpp::traits::input_parameter< NumericVector >::type futilityBounds(futilityBoundsSEXP); Rcpp::traits::input_parameter< NumericVector >::type alpha0Vec(alpha0VecSEXP); Rcpp::traits::input_parameter< NumericVector >::type pi1Vec(pi1VecSEXP); Rcpp::traits::input_parameter< double >::type pi2(pi2SEXP); Rcpp::traits::input_parameter< double >::type eventTime(eventTimeSEXP); Rcpp::traits::input_parameter< NumericVector >::type piecewiseSurvivalTime(piecewiseSurvivalTimeSEXP); Rcpp::traits::input_parameter< NumericVector >::type cdfValues1(cdfValues1SEXP); Rcpp::traits::input_parameter< NumericVector >::type cdfValues2(cdfValues2SEXP); Rcpp::traits::input_parameter< NumericVector >::type lambdaVec1(lambdaVec1SEXP); Rcpp::traits::input_parameter< NumericVector >::type lambdaVec2(lambdaVec2SEXP); Rcpp::traits::input_parameter< NumericVector >::type phi(phiSEXP); Rcpp::traits::input_parameter< int >::type maxNumberOfSubjects(maxNumberOfSubjectsSEXP); Rcpp::traits::input_parameter< int >::type maxNumberOfIterations(maxNumberOfIterationsSEXP); Rcpp::traits::input_parameter< int >::type maxNumberOfRawDatasetsPerStage(maxNumberOfRawDatasetsPerStageSEXP); Rcpp::traits::input_parameter< double >::type kappa(kappaSEXP); Rcpp::traits::input_parameter< int >::type calcEventsFunctionType(calcEventsFunctionTypeSEXP); Rcpp::traits::input_parameter< Nullable >::type calcEventsFunctionR(calcEventsFunctionRSEXP); Rcpp::traits::input_parameter< SEXP >::type calcEventsFunctionCpp(calcEventsFunctionCppSEXP); rcpp_result_gen = Rcpp::wrap(getSimulationSurvivalCpp(designNumber, kMax, sided, criticalValues, informationRates, conditionalPower, plannedEvents, thetaH1, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, directionUpper, allocationRatioPlanned, accrualTime, treatmentGroup, thetaH0, futilityBounds, alpha0Vec, pi1Vec, pi2, eventTime, piecewiseSurvivalTime, cdfValues1, cdfValues2, lambdaVec1, lambdaVec2, phi, maxNumberOfSubjects, maxNumberOfIterations, maxNumberOfRawDatasetsPerStage, kappa, calcEventsFunctionType, calcEventsFunctionR, calcEventsFunctionCpp)); return rcpp_result_gen; END_RCPP } // getOneMinusQNorm double getOneMinusQNorm(double p, double mean, double sd, double lowerTail, double logP, double epsilon); RcppExport SEXP _rpact_getOneMinusQNorm(SEXP pSEXP, SEXP meanSEXP, SEXP sdSEXP, SEXP lowerTailSEXP, SEXP logPSEXP, SEXP epsilonSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type p(pSEXP); Rcpp::traits::input_parameter< double >::type mean(meanSEXP); Rcpp::traits::input_parameter< double >::type sd(sdSEXP); Rcpp::traits::input_parameter< double >::type lowerTail(lowerTailSEXP); Rcpp::traits::input_parameter< double >::type logP(logPSEXP); Rcpp::traits::input_parameter< double >::type epsilon(epsilonSEXP); rcpp_result_gen = Rcpp::wrap(getOneMinusQNorm(p, mean, sd, lowerTail, logP, epsilon)); return rcpp_result_gen; END_RCPP } // zeroin double zeroin(Function f, double lower, double upper, double tolerance, int maxIter); RcppExport SEXP _rpact_zeroin(SEXP fSEXP, SEXP lowerSEXP, SEXP upperSEXP, SEXP toleranceSEXP, SEXP maxIterSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< Function >::type f(fSEXP); Rcpp::traits::input_parameter< double >::type lower(lowerSEXP); Rcpp::traits::input_parameter< double >::type upper(upperSEXP); Rcpp::traits::input_parameter< double >::type tolerance(toleranceSEXP); Rcpp::traits::input_parameter< int >::type maxIter(maxIterSEXP); rcpp_result_gen = Rcpp::wrap(zeroin(f, lower, upper, tolerance, maxIter)); return rcpp_result_gen; END_RCPP } // getCipheredValue std::string getCipheredValue(String x); RcppExport SEXP _rpact_getCipheredValue(SEXP xSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< String >::type x(xSEXP); rcpp_result_gen = Rcpp::wrap(getCipheredValue(x)); return rcpp_result_gen; END_RCPP } // getFraction IntegerVector getFraction(double x, double epsilon, int maxNumberOfSearchSteps); RcppExport SEXP _rpact_getFraction(SEXP xSEXP, SEXP epsilonSEXP, SEXP maxNumberOfSearchStepsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type x(xSEXP); Rcpp::traits::input_parameter< double >::type epsilon(epsilonSEXP); Rcpp::traits::input_parameter< int >::type maxNumberOfSearchSteps(maxNumberOfSearchStepsSEXP); rcpp_result_gen = Rcpp::wrap(getFraction(x, epsilon, maxNumberOfSearchSteps)); return rcpp_result_gen; END_RCPP } static const R_CallMethodDef CallEntries[] = { {"_rpact_getFisherCombinationSizeCpp", (DL_FUNC) &_rpact_getFisherCombinationSizeCpp, 5}, {"_rpact_getSimulatedAlphaCpp", (DL_FUNC) &_rpact_getSimulatedAlphaCpp, 5}, {"_rpact_getFisherCombinationCasesCpp", (DL_FUNC) &_rpact_getFisherCombinationCasesCpp, 2}, {"_rpact_getDesignFisherTryCpp", (DL_FUNC) &_rpact_getDesignFisherTryCpp, 8}, {"_rpact_getGroupSequentialProbabilitiesCpp", (DL_FUNC) &_rpact_getGroupSequentialProbabilitiesCpp, 2}, {"_rpact_getDesignGroupSequentialPampallonaTsiatisCpp", (DL_FUNC) &_rpact_getDesignGroupSequentialPampallonaTsiatisCpp, 9}, {"_rpact_getDesignGroupSequentialUserDefinedAlphaSpendingCpp", (DL_FUNC) &_rpact_getDesignGroupSequentialUserDefinedAlphaSpendingCpp, 7}, {"_rpact_getDesignGroupSequentialAlphaSpendingCpp", (DL_FUNC) &_rpact_getDesignGroupSequentialAlphaSpendingCpp, 9}, {"_rpact_getDesignGroupSequentialDeltaWTCpp", (DL_FUNC) &_rpact_getDesignGroupSequentialDeltaWTCpp, 8}, {"_rpact_getDesignGroupSequentialPocockCpp", (DL_FUNC) &_rpact_getDesignGroupSequentialPocockCpp, 7}, {"_rpact_getDesignGroupSequentialOBrienAndFlemingCpp", (DL_FUNC) &_rpact_getDesignGroupSequentialOBrienAndFlemingCpp, 7}, {"_rpact_getDesignGroupSequentialBetaSpendingCpp", (DL_FUNC) &_rpact_getDesignGroupSequentialBetaSpendingCpp, 16}, {"_rpact_getDesignGroupSequentialUserDefinedBetaSpendingCpp", (DL_FUNC) &_rpact_getDesignGroupSequentialUserDefinedBetaSpendingCpp, 13}, {"_rpact_getSimulationMeansLoopCpp", (DL_FUNC) &_rpact_getSimulationMeansLoopCpp, 24}, {"_rpact_getSimulationRatesCpp", (DL_FUNC) &_rpact_getSimulationRatesCpp, 24}, {"_rpact_getSimulationSurvivalCpp", (DL_FUNC) &_rpact_getSimulationSurvivalCpp, 33}, {"_rpact_getOneMinusQNorm", (DL_FUNC) &_rpact_getOneMinusQNorm, 6}, {"_rpact_zeroin", (DL_FUNC) &_rpact_zeroin, 5}, {"_rpact_getCipheredValue", (DL_FUNC) &_rpact_getCipheredValue, 1}, {"_rpact_getFraction", (DL_FUNC) &_rpact_getFraction, 3}, {NULL, NULL, 0} }; RcppExport void R_init_rpact(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } rpact/vignettes/0000755000176200001440000000000014450551401013372 5ustar liggesusersrpact/vignettes/rpact_getting_started.Rmd0000644000176200001440000001563714450500430020426 0ustar liggesusers--- title: "Getting started with rpact" author: "Friedrich Pahlke and Gernot Wassmer" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Getting started with rpact} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` Confirmatory Adaptive Clinical Trial Design, Simulation, and Analysis. ## Functional Range * Sample size and power calculation for + means (continuous endpoint) + rates (binary endpoint) + survival trials with - piecewise accrual time and intensity - piecewise exponential survival time - survival times that follow a Weibull distribution * Fixed sample design and designs with interim analysis stages * Simulation tool for means, rates, and survival data + Assessment of adaptive sample size/event number recalculations based on conditional power + Assessment of treatment selection strategies in multi-arm trials * Adaptive analysis of means, rates, and survival data * Adaptive designs and analysis for multi-arm trials * Simulation and analysis for enrichment designs testing means, rates, and hazard ratios ## Learn to use rpact We recommend three ways to learn how to use `rpact`: > 1. Use the Shiny app: [shiny.rpact.com](https://www.rpact.com/products#public-rpact-shiny-app) > 2. Use the Vignettes: > [www.rpact.org/vignettes](https://www.rpact.org/vignettes/) > 3. Book a training: > [www.rpact.com](https://www.rpact.com/services#learning-and-training) ### Vignettes The vignettes are hosted at [www.rpact.org/vignettes](https://www.rpact.org/vignettes/) and cover the following topics: 1. Defining Group Sequential Boundaries with rpact 2. Designing Group Sequential Trials with Two Groups and a Continuous Endpoint with rpact 3. Designing Group Sequential Trials with a Binary Endpoint with rpact 4. Designing Group Sequential Trials with Two Groups and a Survival Endpoint with rpact 5. Simulation-Based Design of Group Sequential Trials with a Survival Endpoint with rpact 6. An Example to Illustrate Boundary Re-Calculations during the Trial with rpact 7. Analysis of a Group Sequential Trial with a Survival Endpoint using rpact 8. Defining Accrual Time and Accrual Intensity with rpact 9. How to use R Generics with rpact 10. How to Create Admirable Plots with rpact 11. Comparing Sample Size and Power Calculation Results for a Group Sequential Trial with a Survival Endpoint: rpact vs. gsDesign 12. Supplementing and Enhancing rpact’s Graphical Capabilities with ggplot2 13. Using the Inverse Normal Combination Test for Analyzing a Trial with Continuous Endpoint and Potential Sample Size Re-Assessment with rpact 14. Planning a Trial with Binary Endpoints with rpact 15. Planning a Survival Trial with rpact 16. Simulation of a Trial with a Binary Endpoint and Unblinded Sample Size Re-Calculation with rpact 17. How to Create Summaries with rpact 18. How to Create One- and Multi-Arm Analysis Result Plots with rpact 19. How to Create One- and Multi-Arm Simulation Result Plots with rpact 20. Simulating Multi-Arm Designs with a Continuous Endpoint using rpact 21. Analysis of a Multi-Arm Design with a Binary Endpoint using rpact 22. Step-by-Step rpact Tutorial 23. Planning and Analyzing a Group-Sequential Multi-Arm Multi-Stage Design with Binary Endpoint using rpact 24. Two-arm analysis for continuous data with covariates from raw data (*exclusive*) 25. How to install the latest developer version (*exclusive*) 26. Delayed Response Designs with rpact ## User Concept ### Workflow * Everything is starting with a design, e.g.: `design <- getDesignGroupSequential()` * Find the optimal design parameters with help of `rpact` comparison tools: `getDesignSet` * Calculate the required sample size, e.g.: `getSampleSizeMeans()`, `getPowerMeans()` * Simulate specific characteristics of an adaptive design, e.g.: `getSimulationMeans()` * Collect your data, import it into R and create a dataset: `data <- getDataset()` * Analyze your data: `getAnalysisResults(design, data)` ### Focus on Usability The most important `rpact` functions have intuitive names: * `getDesign`[`GroupSequential`/`InverseNormal`/`Fisher`]`()` * `getDesignCharacteristics()` * `getSampleSize`[`Means`/`Rates`/`Survival`]`()` * `getPower`[`Means`/`Rates`/`Survival`]`()` * `getSimulation`[`MultiArm`/`Enrichment`]``[`Means`/`Rates`/`Survival`]`()` * `getDataSet()` * `getAnalysisResults()` * `getStageResults()` RStudio/Eclipse: auto code completion makes it easy to use these functions. ### R generics In general, everything runs with the R standard functions which are always present in R: so-called R generics, e.g., `print`, `summary`, `plot`, `as.data.frame`, `names`, `length` ### Utilities Several utility functions are available, e.g. * `getAccrualTime()` * `getPiecewiseSurvivalTime()` * `getNumberOfSubjects()` * `getEventProbabilities()` * `getPiecewiseExponentialDistribution()` * survival helper functions for conversion of `pi`, `lambda` and `median`, e.g., `getLambdaByMedian()` * `testPackage()`: installation qualification on a client computer or company server (via unit tests) ## Validation Please [contact](https://www.rpact.com/contact) us to learn how to use `rpact` on FDA/GxP-compliant validated corporate computer systems and how to get a copy of the formal validation documentation that is customized and licensed for exclusive use by your company, e.g., to fulfill regulatory requirements. ## About * **rpact** is a comprehensive validated^[The rpact validation documentation is available exclusively for our customers and supporting companies. For more information visit [www.rpact.com/services/sla](https://www.rpact.com/services/sla)] R package for clinical research which + enables the design and analysis of confirmatory adaptive group sequential designs + is a powerful sample size calculator + is a free of charge open-source software licensed under [LGPL-3](https://cran.r-project.org/web/licenses/LGPL-3) + particularly, implements the methods described in the recent monograph by [Wassmer and Brannath (2016)](https://doi.org/10.1007%2F978-3-319-32562-0) > For more information please visit [www.rpact.org](https://www.rpact.org) * **RPACT** is a company which offers + enterprise software development services + technical support for the `rpact` package + consultancy and user training for clinical research using R + validated software solutions and R package development for clinical research > For more information please visit [www.rpact.com](https://www.rpact.com) ## Contact * [info@rpact.com](mailto:info@rpact.com) * [www.rpact.com/contact](https://www.rpact.com/contact) rpact/R/0000755000176200001440000000000014450551044011566 5ustar liggesusersrpact/R/parameter_descriptions.R0000644000176200001440000011277514445307576016511 0ustar liggesusers## | ## | *Parameters* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' Parameter Description: "..." #' @param ... Ensures that all arguments (starting from the "...") are to be named and #' that a warning will be displayed if unknown arguments are passed. #' @name param_three_dots #' @keywords internal NULL #' Parameter Description: "..." (optional plot arguments) #' @param ... Optional plot arguments. At the moment \code{xlim} and \code{ylim} are implemented #' for changing x or y axis limits without dropping data observations. #' @name param_three_dots_plot #' @keywords internal NULL #' Parameter Description: Maximum Number of Stages #' @param kMax The maximum number of stages \code{K}. #' Must be a positive integer of length 1 (default value is \code{3}). #' The maximum selectable \code{kMax} is \code{20} for group sequential or inverse normal and #' \code{6} for Fisher combination test designs. #' @name param_kMax #' @keywords internal NULL #' Parameter Description: Alpha #' @param alpha The significance level alpha, default is \code{0.025}. Must be a positive numeric of length 1. #' @name param_alpha #' @keywords internal NULL #' Parameter Description: Beta #' @param beta Type II error rate, necessary for providing sample size calculations #' (e.g., \code{\link[=getSampleSizeMeans]{getSampleSizeMeans()}}), beta spending function designs, #' or optimum designs, default is \code{0.20}. Must be a positive numeric of length 1. #' @name param_beta #' @keywords internal NULL #' Parameter Description: Sided #' @param sided Is the alternative one-sided (\code{1}) or two-sided (\code{2}), default is \code{1}. Must be a positive integer of length 1. #' @name param_sided #' @keywords internal NULL #' Parameter Description: Information Rates #' @param informationRates The information rates (that must be fixed prior to the trial), #' default is \code{(1:kMax) / kMax}. #' @name param_informationRates #' @keywords internal NULL #' Parameter Description: Binding Futility #' @param bindingFutility Logical. If \code{bindingFutility = TRUE} is specified the calculation of #' the critical values is affected by the futility bounds and the futility threshold is binding in the #' sense that the study must be stopped if the futility condition was reached (default is \code{FALSE}). #' @name param_bindingFutility #' @keywords internal NULL #' Parameter Description: Type of Design #' @param typeOfDesign The type of design. Type of design is one of the following: #' O'Brien & Fleming (\code{"OF"}), Pocock (\code{"P"}), Wang & Tsiatis Delta class (\code{"WT"}), #' Pampallona & Tsiatis (\code{"PT"}), Haybittle & Peto ("HP"), #' Optimum design within Wang & Tsiatis class (\code{"WToptimum"}), #' O'Brien & Fleming type alpha spending (\code{"asOF"}), Pocock type alpha spending (\code{"asP"}), #' Kim & DeMets alpha spending (\code{"asKD"}), Hwang, Shi & DeCani alpha spending (\code{"asHSD"}), #' user defined alpha spending (\code{"asUser"}), no early efficacy stop (\code{"noEarlyEfficacy"}), #' default is \code{"OF"}. #' @name param_typeOfDesign #' @keywords internal NULL #' Parameter Description: Design #' @param design The trial design. #' @name param_design #' @keywords internal NULL #' Parameter Description: Design with Default #' @param design The trial design. If no trial design is specified, a fixed sample size design is used. #' In this case, Type I error rate \code{alpha}, Type II error rate \code{beta}, \code{twoSidedPower}, #' and \code{sided} can be directly entered as argument where necessary. #' @name param_design_with_default #' @keywords internal NULL #' Parameter Description: N_max #' @param nMax The maximum sample size. Must be a positive integer of length 1. #' @name param_nMax #' @keywords internal NULL #' Parameter Description: Theta #' @param theta A vector of standardized effect sizes (theta values), default is a sequence from -1 to 1. #' @name param_theta #' @keywords internal NULL #' Parameter Description: User Alpha Spending #' @param userAlphaSpending The user defined alpha spending. #' Numeric vector of length \code{kMax} containing the cumulative #' alpha-spending (Type I error rate) up to each interim stage: \code{0 <= alpha_1 <= ... <= alpha_K <= alpha}. #' @name param_userAlphaSpending #' @keywords internal NULL ## ## Sample Size and Power ## #' Parameter Description: Effect Under Alternative #' @param thetaH1 If specified, the value of the alternative under which #' the conditional power or sample size recalculation calculation is performed. Must be a numeric of length 1. #' @name param_thetaH1 #' @keywords internal NULL #' Parameter Description: Standard Deviation #' @param stDev The standard deviation under which the sample size or power #' calculation is performed, default is \code{1}. #' If \code{meanRatio = TRUE} is specified, \code{stDev} defines #' the coefficient of variation \code{sigma / mu2}. Must be a positive numeric of length 1. #' @name param_stDev #' @keywords internal NULL #' Parameter Description: Lambda (1) #' @param lambda1 The assumed hazard rate in the treatment group, there is no default. #' \code{lambda1} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1. #' @name param_lambda1 #' @keywords internal NULL #' Parameter Description: Lambda (2) #' @param lambda2 The assumed hazard rate in the reference group, there is no default. #' \code{lambda2} can also be used to define piecewise exponentially distributed survival times (see details). Must be a positive numeric of length 1. #' @name param_lambda2 #' @keywords internal NULL #' Parameter Description: Pi (1) for Rates #' @param pi1 A numeric value or vector that represents the assumed probability in #' the active treatment group if two treatment groups #' are considered, or the alternative probability for a one treatment group design, #' default is \code{seq(0.2, 0.5, 0.1)} (power calculations and simulations) or #' \code{seq(0.4, 0.6, 0.1)} (sample size calculations). #' @name param_pi1_rates #' @keywords internal NULL #' Parameter Description: Pi (1) for Survival Data #' @param pi1 A numeric value or vector that represents the assumed event rate in the treatment group, #' default is \code{seq(0.2, 0.5, 0.1)} (power calculations and simulations) or #' \code{seq(0.4, 0.6, 0.1)} (sample size calculations). #' @name param_pi1_survival #' @keywords internal NULL #' Parameter Description: Pi (2) for Rates #' @param pi2 A numeric value that represents the assumed probability in the reference group if two treatment #' groups are considered, default is \code{0.2}. #' @name param_pi2_rates #' @keywords internal NULL #' Parameter Description: Pi (2) for Survival Data #' @param pi2 A numeric value that represents the assumed event rate in the control group, default is \code{0.2}. #' @name param_pi2_survival #' @keywords internal NULL #' Parameter Description: Median (1) #' @param median1 The assumed median survival time in the treatment group, there is no default. #' @name param_median1 #' @keywords internal NULL #' Parameter Description: Median (2) #' @param median2 The assumed median survival time in the reference group, there is no default. Must be a positive numeric of length 1. #' @name param_median2 #' @keywords internal NULL #' Parameter Description: Hazard Ratio #' @param hazardRatio The vector of hazard ratios under consideration. #' If the event or hazard rates in both treatment groups are defined, the hazard ratio needs #' not to be specified as it is calculated, there is no default. Must be a positive numeric of length 1. #' @name param_hazardRatio #' @keywords internal NULL #' Parameter Description: Event Time #' @param eventTime The assumed time under which the event rates are calculated, default is \code{12}. #' @name param_eventTime #' @keywords internal NULL #' Parameter Description: Piecewise Survival Time #' @param piecewiseSurvivalTime A vector that specifies the time intervals for the piecewise #' definition of the exponential survival time cumulative distribution function \cr #' (for details see \code{\link[=getPiecewiseSurvivalTime]{getPiecewiseSurvivalTime()}}). #' @name param_piecewiseSurvivalTime #' @keywords internal NULL #' Parameter Description: Kappa #' @param kappa A numeric value > 0. A \code{kappa != 1} will be used for the specification #' of the shape of the Weibull distribution. #' Default is \code{1}, i.e., the exponential survival distribution is used instead of the Weibull distribution. #' Note that the Weibull distribution cannot be used for the piecewise definition of #' the survival time distribution, i.e., only \code{piecewiselambda} (as a single value) and \code{kappa} #' can be specified. #' This function is equivalent to \code{pweibull(t, shape = kappa, scale = 1 / lambda)} #' of the \code{stats} package, i.e., the scale parameter is \code{1 / 'hazard rate'}.\cr #' For example, #' \code{getPiecewiseExponentialDistribution(time = 130, piecewiseLambda = 0.01, kappa = 4.2)} #' and \code{pweibull(q = 130, shape = 4.2, scale = 1 / 0.01)} provide the sample result. #' @name param_kappa #' @keywords internal NULL #' Parameter Description: Type Of Computation #' @param typeOfComputation Three options are available: \code{"Schoenfeld"}, \code{"Freedman"}, \code{"HsiehFreedman"}, #' the default is \code{"Schoenfeld"}. For details, see Hsieh (Statistics in Medicine, 1992). #' For non-inferiority testing (i.e., \code{thetaH0 != 1}), only Schoenfeld's formula can be used. #' @name param_typeOfComputation #' @keywords internal NULL #' Parameter Description: Dropout Rate (1) #' @param dropoutRate1 The assumed drop-out rate in the treatment group, default is \code{0}. #' @name param_dropoutRate1 #' @keywords internal NULL #' Parameter Description: Dropout Rate (2) #' @param dropoutRate2 The assumed drop-out rate in the control group, default is \code{0}. #' @name param_dropoutRate2 #' @keywords internal NULL #' Parameter Description: Dropout Time #' @param dropoutTime The assumed time for drop-out rates in the control and the #' treatment group, default is \code{12}. #' @name param_dropoutTime #' @keywords internal NULL ## ## Sample Size / Power ## #' Parameter Description: Alternative #' @param alternative The alternative hypothesis value for testing means. This can be a vector of assumed #' alternatives, default is \code{seq(0, 1, 0.2)} (power calculations) or \code{seq(0.2, 1, 0.2)} (sample size calculations). #' @name param_alternative #' @keywords internal NULL #' Parameter Description: Alternative for Simulation #' @param alternative The alternative hypothesis value for testing means under which the data is simulated. #' This can be a vector of assumed alternatives, default is \code{seq(0, 1, 0.2)}. #' @name param_alternative_simulation #' @keywords internal NULL ## ## Analysis ## #' Parameter Description: Stage Results #' @param stageResults The results at given stage, obtained from \code{\link[=getStageResults]{getStageResults()}}. #' @name param_stageResults #' @keywords internal NULL #' Parameter Description: Stage #' @param stage The stage number (optional). Default: total number of existing stages in the data input. #' @name param_stage #' @keywords internal NULL #' Parameter Description: N Planned #' @param nPlanned The additional (i.e., "new" and not cumulative) sample size planned for each of the subsequent stages. #' The argument must be a vector with length equal to the number of remaining stages and contain #' the combined sample size from both treatment groups if two groups are considered. For survival outcomes, #' it should contain the planned number of additional events. #' For multi-arm designs, it is the per-comparison (combined) sample size. #' For enrichment designs, it is the (combined) sample size for the considered sub-population. #' @name param_nPlanned #' @keywords internal NULL #' Parameter Description: Allocation Ratio Planned #' @param allocationRatioPlanned The planned allocation ratio \code{n1 / n2} for a two treatment groups #' design, default is \code{1}. For multi-arm designs, it is the allocation ratio relating the active arm(s) to the control. #' For simulating means and rates for a two treatment groups design, it can be a vector of length kMax, the number of stages. #' It can be a vector of length kMax, too, for multi-arm and enrichment designs. #' In these cases, a change of allocating subjects to treatment groups over the stages can be assessed. #' @name param_allocationRatioPlanned #' @keywords internal NULL #' Parameter Description: Allocation Ratio Planned With Optimum Option #' @param allocationRatioPlanned The planned allocation ratio \code{n1 / n2} for a two treatment groups #' design, default is \code{1}. If \code{allocationRatioPlanned = 0} is entered, #' the optimal allocation ratio yielding the smallest overall sample size is determined. #' @name param_allocationRatioPlanned_sampleSize #' @keywords internal NULL #' Parameter Description: Direction Upper #' @param directionUpper Logical. Specifies the direction of the alternative, #' only applicable for one-sided testing; default is \code{TRUE} #' which means that larger values of the test statistics yield smaller p-values. #' @name param_directionUpper #' @keywords internal NULL #' Parameter Description: Data Input #' @param dataInput The summary data used for calculating the test results. #' This is either an element of \code{DatasetMeans}, of \code{DatasetRates}, or of \code{DatasetSurvival} #' and should be created with the function \code{\link[=getDataset]{getDataset()}}. #' For more information see \code{\link[=getDataset]{getDataset()}}. #' @name param_dataInput #' @keywords internal NULL #' Parameter Description: Normal Approximation #' @param normalApproximation The type of computation of the p-values. Default is \code{FALSE} for #' testing means (i.e., the t test is used) and \code{TRUE} for testing rates and the hazard ratio. #' For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test #' (one sample) or the exact test of Fisher (two samples) is used for calculating the p-values. #' In the survival setting \code{normalApproximation = FALSE} has no effect. #' @name param_normalApproximation #' @keywords internal NULL #' Parameter Description: Theta H0 #' @param thetaH0 The null hypothesis value, #' default is \code{0} for the normal and the binary case (testing means and rates, respectively), #' it is \code{1} for the survival case (testing the hazard ratio).\cr\cr #' For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. #' That is, in case of (one-sided) testing of #' \itemize{ #' \item \emph{means}: a value \code{!= 0} #' (or a value \code{!= 1} for testing the mean ratio) can be specified. #' \item \emph{rates}: a value \code{!= 0} #' (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. #' \item \emph{survival data}: a bound for testing H0: \code{hazard ratio = thetaH0 != 1} can be specified. #' } #' For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for #' defining the null hypothesis H0: \code{pi = thetaH0}. #' @name param_thetaH0 #' @keywords internal NULL #' Parameter Description: Legend Position On Plots #' @param legendPosition The position of the legend. #' By default (\code{NA_integer_}) the algorithm tries to find a suitable position. #' Choose one of the following values to specify the position manually: #' \itemize{ #' \item \code{-1}: no legend will be shown #' \item \code{NA}: the algorithm tries to find a suitable position #' \item \code{0}: legend position outside plot #' \item \code{1}: legend position left top #' \item \code{2}: legend position left center #' \item \code{3}: legend position left bottom #' \item \code{4}: legend position right top #' \item \code{5}: legend position right center #' \item \code{6}: legend position right bottom #' } #' @name param_legendPosition #' @keywords internal NULL #' Parameter Description: Grid (Output Specification Of Multiple Plots) #' @param grid An integer value specifying the output of multiple plots. #' By default (\code{1}) a list of \code{ggplot} objects will be returned. #' If a \code{grid} value > 1 was specified, a grid plot will be returned #' if the number of plots is <= specified \code{grid} value; #' a list of \code{ggplot} objects will be returned otherwise. #' If \code{grid = 0} is specified, all plots will be created using \code{\link[base]{print}} command #' and a list of \code{ggplot} objects will be returned invisible. #' Note that one of the following packages must be installed to create a grid plot: #' 'ggpubr', 'gridExtra', or 'cowplot'. #' @name param_grid #' @keywords internal NULL ## ## Simulation ## #' Parameter Description: Min Number Of Events Per Stage #' @param minNumberOfEventsPerStage When performing a data driven sample size recalculation, #' the numeric vector \code{minNumberOfEventsPerStage} with length kMax determines the #' minimum number of events per stage (i.e., not cumulated), the first element #' is not taken into account. #' @name param_minNumberOfEventsPerStage #' @keywords internal NULL #' Parameter Description: Max Number Of Events Per Stage #' @param maxNumberOfEventsPerStage When performing a data driven sample size recalculation, #' the numeric vector \code{maxNumberOfEventsPerStage} with length kMax determines the maximum number #' of events per stage (i.e., not cumulated), the first element is not taken into account. #' @name param_maxNumberOfEventsPerStage #' @keywords internal NULL #' Parameter Description: Planned Subjects #' @param plannedSubjects \code{plannedSubjects} is a numeric vector of length \code{kMax} (the number of stages of the design) #' that determines the number of cumulated (overall) subjects when the interim stages are planned. #' For two treatment arms, it is the number of subjects for both treatment arms. #' For multi-arm designs, \code{plannedSubjects} refers to the number of subjects per selected active arm. #' @name param_plannedSubjects #' @keywords internal NULL #' Parameter Description: Planned Events #' @param plannedEvents \code{plannedEvents} is a numeric vector of length \code{kMax} (the number of stages of the design) #' that determines the number of cumulated (overall) events in survival designs when the interim stages are planned. #' For two treatment arms, it is the number of events for both treatment arms. #' For multi-arm designs, \code{plannedEvents} refers to the overall number of events for the selected arms plus control. #' @name param_plannedEvents #' @keywords internal NULL #' Parameter Description: Minimum Number Of Subjects Per Stage #' @param minNumberOfSubjectsPerStage When performing a data driven sample size recalculation, #' the numeric vector \code{minNumberOfSubjectsPerStage} with length kMax determines the #' minimum number of subjects per stage (i.e., not cumulated), the first element #' is not taken into account. For two treatment arms, it is the number of subjects for both treatment arms. #' For multi-arm designs \code{minNumberOfSubjectsPerStage} refers #' to the minimum number of subjects per selected active arm. #' @name param_minNumberOfSubjectsPerStage #' @keywords internal NULL #' Parameter Description: Maximum Number Of Subjects Per Stage #' @param maxNumberOfSubjectsPerStage When performing a data driven sample size recalculation, #' the numeric vector \code{maxNumberOfSubjectsPerStage} with length kMax determines the maximum number #' of subjects per stage (i.e., not cumulated), the first element is not taken into account. #' For two treatment arms, it is the number of subjects for both treatment arms. #' For multi-arm designs \code{maxNumberOfSubjectsPerStage} refers #' to the maximum number of subjects per selected active arm. #' @name param_maxNumberOfSubjectsPerStage #' @keywords internal NULL #' Parameter Description: Conditional Power #' @param conditionalPower The conditional power for the subsequent stage #' under which the sample size recalculation is performed. Must be a positive numeric of length 1. #' @name param_conditionalPower #' @keywords internal NULL #' Parameter Description: Conditional Power #' @param conditionalPower If \code{conditionalPower} together with \code{minNumberOfSubjectsPerStage} and #' \code{maxNumberOfSubjectsPerStage} (or \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} #' for survival designs) is specified, a sample size recalculation based on the specified conditional power is performed. #' It is defined as the power for the subsequent stage given the current data. By default, #' the conditional power will be calculated under the observed effect size. Optionally, you can also specify \code{thetaH1} and #' \code{stDevH1} (for simulating means), \code{pi1H1} and \code{pi2H1} (for simulating rates), or \code{thetaH1} (for simulating #' hazard ratios) as parameters under which it is calculated and the sample size recalculation is performed. #' @name param_conditionalPowerSimulation #' @keywords internal NULL #' Parameter Description: Maximum Number Of Iterations #' @param maxNumberOfIterations The number of simulation iterations, default is \code{1000}. Must be a positive integer of length 1. #' @name param_maxNumberOfIterations #' @keywords internal NULL #' Parameter Description: Calculate Subjects Function #' @param calcSubjectsFunction Optionally, a function can be entered that defines the way of performing the sample size #' recalculation. By default, sample size recalculation is performed with conditional power and specified #' \code{minNumberOfSubjectsPerStage} and \code{maxNumberOfSubjectsPerStage} (see details and examples). #' @name param_calcSubjectsFunction #' @keywords internal NULL #' Parameter Description: Calculate Events Function #' @param calcEventsFunction Optionally, a function can be entered that defines the way of performing the sample size #' recalculation. By default, event number recalculation is performed with conditional power and specified #' \code{minNumberOfEventsPerStage} and \code{maxNumberOfEventsPerStage} (see details and examples). #' @name param_calcEventsFunction #' @keywords internal NULL #' Parameter Description: Seed #' @param seed The seed to reproduce the simulation, default is a random seed. #' @name param_seed #' @keywords internal NULL #' Parameter Description: Show Statistics #' @param showStatistics Logical. If \code{TRUE}, summary statistics of the simulated data #' are displayed for the \code{print} command, otherwise the output is suppressed, default #' is \code{FALSE}. #' @name param_showStatistics #' @keywords internal NULL #' Parameter Description: Maximum Number Of Subjects #' @param maxNumberOfSubjects \code{maxNumberOfSubjects > 0} needs to be specified. #' For two treatment arms, it is the maximum number of subjects for both treatment arms. #' @name param_maxNumberOfSubjects #' @keywords internal NULL #' Parameter Description: Maximum Number Of Subjects For Survival Endpoint #' @param maxNumberOfSubjects \code{maxNumberOfSubjects > 0} needs to be specified. #' If accrual time and accrual intensity are specified, this will be calculated. Must be a positive integer of length 1. #' @name param_maxNumberOfSubjects_survival #' @keywords internal NULL #' Parameter Description: Accrual Time #' @param accrualTime The assumed accrual time intervals for the study, default is #' \code{c(0, 12)} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}). #' @name param_accrualTime #' @keywords internal NULL #' Parameter Description: Accrual Intensity #' @param accrualIntensity A numeric vector of accrual intensities, default is the relative #' intensity \code{0.1} (for details see \code{\link[=getAccrualTime]{getAccrualTime()}}). #' @name param_accrualIntensity #' @keywords internal NULL #' Parameter Description: Accrual Intensity Type #' @param accrualIntensityType A character value specifying the accrual intensity input type. #' Must be one of \code{"auto"}, \code{"absolute"}, or \code{"relative"}; default is \code{"auto"}, #' i.e., if all values are < 1 the type is \code{"relative"}, otherwise it is \code{"absolute"}. #' @name param_accrualIntensityType #' @keywords internal NULL #' Parameter Description: Standard Deviation Under Alternative #' @param stDevH1 If specified, the value of the standard deviation under which #' the conditional power or sample size recalculation calculation is performed, #' default is the value of \code{stDev}. Must be a positive numeric of length 1. #' @name param_stDevH1 #' @keywords internal NULL #' Parameter Description: Standard Deviation for Simulation #' @param stDev The standard deviation under which the data is simulated, #' default is \code{1}. #' If \code{meanRatio = TRUE} is specified, \code{stDev} defines #' the coefficient of variation \code{sigma / mu2}. Must be a positive numeric of length 1. #' @name param_stDevSimulation #' @keywords internal NULL #' Parameter Description: Number Of Treatment Groups #' @param groups The number of treatment groups (1 or 2), default is \code{2}. #' @name param_groups #' @keywords internal NULL ## ## Other ## #' Parameter Description: Nice Column Names Enabled #' @param niceColumnNamesEnabled Logical. If \code{TRUE}, nice looking column #' names will be used; syntactic names (variable names) otherwise #' (see \code{\link[base]{make.names}}). #' @name param_niceColumnNamesEnabled #' @keywords internal NULL #' Parameter Description: Include All Parameters #' @param includeAllParameters Logical. If \code{TRUE}, all available #' parameters will be included in the data frame; #' a meaningful parameter selection otherwise, default is \code{FALSE}. #' @name param_includeAllParameters #' @keywords internal NULL #' Parameter Description: Digits #' @param digits Defines how many digits are to be used for numeric values. Must be a positive integer of length 1. #' @name param_digits #' @keywords internal NULL #' Parameter Description: Tolerance #' @param tolerance The numerical tolerance, default is \code{1e-06}. Must be a positive numeric of length 1. #' @name param_tolerance #' @keywords internal NULL ## ## Plots ## #' Parameter Description: Plot Points Enabled #' @param plotPointsEnabled Logical. If \code{TRUE}, additional points will be plotted. #' @name param_plotPointsEnabled #' @keywords internal NULL #' Parameter Description: Palette #' @param palette The palette, default is \code{"Set1"}. #' @name param_palette #' @keywords internal NULL ## ## Multi-Arm and Enrichment Designs ## #' Parameter Description: Intersection Test #' @param intersectionTest Defines the multiple test for the intersection #' hypotheses in the closed system of hypotheses. #' Five options are available in multi-arm designs: \code{"Dunnett"}, \code{"Bonferroni"}, \code{"Simes"}, #' \code{"Sidak"}, and \code{"Hierarchical"}, default is \code{"Dunnett"}. #' @name param_intersectionTest_MultiArm #' @keywords internal NULL #' Parameter Description: Intersection Test #' @param intersectionTest Defines the multiple test for the intersection #' hypotheses in the closed system of hypotheses. #' Four options are available in enrichment designs: \code{"SpiessensDebois"}, \code{"Bonferroni"}, \code{"Simes"}, #' and \code{"Sidak"}, default is \code{"Simes"}. #' @name param_intersectionTest_Enrichment #' @keywords internal NULL #' Parameter Description: Type of Selection #' @param typeOfSelection The way the treatment arms or populations are selected at interim. #' Five options are available: \code{"best"}, \code{"rbest"}, \code{"epsilon"}, \code{"all"}, and \code{"userDefined"}, #' default is \code{"best"}.\cr #' For \code{"rbest"} (select the \code{rValue} best treatment arms/populations), the parameter \code{rValue} has to be specified, #' for \code{"epsilon"} (select treatment arm/population not worse than epsilon compared to the best), the parameter #' \code{epsilonValue} has to be specified. #' If \code{"userDefined"} is selected, \code{"selectArmsFunction"} or \code{"selectPopulationsFunction"} has to be specified. #' @name param_typeOfSelection #' @keywords internal NULL #' Parameter Description: Effect Measure #' @param effectMeasure Criterion for treatment arm/population selection, either based on test statistic #' (\code{"testStatistic"}) or effect estimate (difference for means and rates or ratio for survival) (\code{"effectEstimate"}), #' default is \code{"effectEstimate"}. #' @name param_effectMeasure #' @keywords internal NULL #' Parameter Description: Adaptations #' @param adaptations A logical vector of length \code{kMax - 1} indicating whether or not an adaptation takes #' place at interim k, default is \code{rep(TRUE, kMax - 1)}. #' @name param_adaptations #' @keywords internal NULL #' Parameter Description: Threshold #' @param threshold Selection criterion: treatment arm / population is selected only if \code{effectMeasure} #' exceeds \code{threshold}, default is \code{-Inf}. #' \code{threshold} can also be a vector of length \code{activeArms} referring to #' a separate threshold condition over the treatment arms. #' @name param_threshold #' @keywords internal NULL #' Parameter Description: Effect Matrix #' @param effectMatrix Matrix of effect sizes with \code{activeArms} columns and number of rows #' reflecting the different situations to consider. #' @name param_effectMatrix #' @keywords internal NULL #' Parameter Description: Effect List #' @param effectList List of subsets, prevalences, and effect sizes with columns and number of rows #' reflecting the different situations to consider (see examples). #' @name param_effectList #' @keywords internal NULL #' Parameter Description: Active Arms #' @param activeArms The number of active treatment arms to be compared with control, default is \code{3}. #' @name param_activeArms #' @keywords internal NULL #' Parameter Description: Populations #' @param populations The number of populations in a two-sample comparison, default is \code{3}. #' @name param_populations #' @keywords internal NULL #' Parameter Description: Success Criterion #' @param successCriterion Defines when the study is stopped for efficacy at interim. #' Two options are available: \code{"all"} stops the trial #' if the efficacy criterion is fulfilled for all selected treatment arms/populations, #' \code{"atLeastOne"} stops if at least one of the selected treatment arms/populations is shown to be #' superior to control at interim, default is \code{"all"}. #' @name param_successCriterion #' @keywords internal NULL #' Parameter Description: Type Of Shape #' @param typeOfShape The shape of the dose-response relationship over the treatment groups. #' This can be either \code{"linear"}, \code{"sigmoidEmax"}, or \code{"userDefined"}, #' default is \code{"linear"}.\cr #' For \code{"linear"}, \code{"muMaxVector"} specifies the range #' of effect sizes for the treatment group with highest response. #' If \code{"sigmoidEmax"} is selected, \code{"gED50"} and \code{"slope"} has to be entered #' to specify the ED50 and the slope of the sigmoid Emax model. #' For \code{"sigmoidEmax"}, \code{"muMaxVector"} specifies the range #' of effect sizes for the treatment group with response according to infinite dose. #' If \code{"userDefined"} is selected, \code{"effectMatrix"} has to be entered. #' @name param_typeOfShape #' @keywords internal NULL #' Parameter Description: Variance Option #' @param varianceOption Defines the way to calculate the variance in multiple treatment arms (> 2) #' or population enrichment designs for testing means. For multiple arms, three options are available: #' \code{"overallPooled"}, \code{"pairwisePooled"}, and \code{"notPooled"}, default is \code{"overallPooled"}. #' For enrichment designs, the options are: \code{"pooled"}, \code{"pooledFromFull"} (one subset only), #' and \code{"notPooled"}, default is \code{"pooled"}. #' @name param_varianceOption #' @keywords internal NULL #' Parameter Description: Select Arms Function #' @param selectArmsFunction Optionally, a function can be entered that defines the way of how treatment arms #' are selected. This function is allowed to depend on \code{effectVector} with length \code{activeArms} #' and \code{stage} (see examples). #' @name param_selectArmsFunction #' @keywords internal NULL #' Parameter Description: Select Populations Function #' @param selectPopulationsFunction Optionally, a function can be entered that defines the way of how populations #' are selected. This function is allowed to depend on \code{effectVector} with length \code{populations} #' and \code{stage} (see examples). #' @name param_selectPopulationsFunction #' @keywords internal NULL #' Parameter Description: Stratified Analysis #' @param stratifiedAnalysis Logical. For enrichment designs, typically a stratified analysis should be chosen. #' For testing rates, also a non-stratified analysis based on overall data can be performed. #' For survival data, only a stratified analysis is possible (see Brannath et al., 2009), #' default is \code{TRUE}. #' @name param_stratifiedAnalysis #' @keywords internal NULL #' Parameter Description: Show Source #' @param showSource Logical. If \code{TRUE}, the parameter names of the object will #' be printed which were used to create the plot; that may be, e.g., #' useful to check the values or to create own plots with the base R \code{plot} function. #' Alternatively \code{showSource} can be defined as one of the following character values: #' \itemize{ #' \item \code{"commands"}: returns a character vector with plot commands #' \item \code{"axes"}: returns a list with the axes definitions #' \item \code{"test"}: all plot commands will be validated with \code{eval(parse())} and #' returned as character vector (function does not stop if an error occurs) #' \item \code{"validate"}: all plot commands will be validated with \code{eval(parse())} and #' returned as character vector (function stops if an error occurs) #' } #' Note: no plot object will be returned if \code{showSource} is a character. #' @name param_showSource #' @keywords internal NULL #' Parameter Description: R Value #' @param rValue For \code{typeOfSelection = "rbest"} (select the \code{rValue} best treatment arms / populations), #' the parameter \code{rValue} has to be specified. #' @name param_rValue #' @keywords internal NULL #' Parameter Description: Epsilon Value #' @param epsilonValue For \code{typeOfSelection = "epsilon"} (select treatment arm / population not worse than #' epsilon compared to the best), the parameter \code{epsilonValue} has to be specified. Must be a numeric of length 1. #' @name param_epsilonValue #' @keywords internal NULL #' Parameter Description: G ED50 #' @param gED50 If \code{typeOfShape = "sigmoidEmax"} is selected, \code{"gED50"} has to be entered #' to specify the ED50 of the sigmoid Emax model. #' @name param_gED50 #' @keywords internal NULL #' Parameter Description: Slope #' @param slope If \code{typeOfShape = "sigmoidEmax"} is selected, \code{"slope"} can be entered #' to specify the slope of the sigmoid Emax model, default is 1. #' @name param_slope #' @keywords internal NULL #' Parameter Description: Maximum Information #' @param maxInformation Positive integer value specifying the maximum information. #' @name param_maxInformation #' @keywords internal NULL #' Parameter Description: Information Epsilon #' @param informationEpsilon Positive integer value specifying the absolute information epsilon, which #' defines the maximum distance from the observed information to the maximum information that causes the final analysis. #' Updates at the final analysis in case the observed information at the final #' analysis is smaller ("under-running") than the planned maximum information \code{maxInformation}, default is 0. #' Alternatively, a floating-point number > 0 and < 1 can be specified to define a relative information epsilon. #' @name param_informationEpsilon #' @keywords internal NULL #' Parameter Description: Plot Settings #' @param plotSettings An object of class \code{PlotSettings} created by \code{\link[=getPlotSettings]{getPlotSetting()s}}. #' @name param_plotSettings #' @keywords internal NULL rpact/R/f_parameter_set_utilities.R0000644000176200001440000001714314210360170017142 0ustar liggesusers## | ## | *Parameter set utilities* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 5924 $ ## | Last changed: $Date: 2022-03-04 10:48:37 +0100 (Fri, 04 Mar 2022) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R NULL .isMatrix <- function(param) { if (missing(param) || is.null(param) || is.list(param)) { return(FALSE) } return(is.matrix(param)) } .isArray <- function(param) { if (missing(param) || is.null(param) || is.list(param)) { return(FALSE) } return(is.array(param)) } .isVector <- function(param) { if (missing(param) || is.null(param) || is.list(param)) { return(FALSE) } return(length(param) > 1) } .getMatrixFormatted <- function(paramValueFormatted, enforceListOuput = FALSE) { if (!is.matrix(paramValueFormatted) && enforceListOuput) { paramValueFormatted <- matrix(paramValueFormatted, nrow = 1) } if (!is.matrix(paramValueFormatted)) { return(list( paramValueFormatted = matrix(as.character(paramValueFormatted), ncol = 1), type = "matrix" )) } matrixFormatted <- paramValueFormatted paramValueFormatted <- .arrayToString(matrixFormatted[1, ]) type <- "vector" if (nrow(matrixFormatted) > 1 && ncol(matrixFormatted) > 0) { type <- "matrix" paramValueFormatted <- list(paramValueFormatted) for (i in 2:nrow(matrixFormatted)) { paramValueFormatted <- c( paramValueFormatted, .arrayToString(matrixFormatted[i, ]) ) } } return(list( paramValueFormatted = paramValueFormatted, type = type )) } .getParameterValueFormatted <- function(obj, parameterName) { tryCatch( { result <- obj$.extractParameterNameAndValue(parameterName) parameterName <- result$parameterName paramValue <- result$paramValue if (isS4(paramValue)) { return(NULL) } if (is.function(paramValue)) { valueStr <- ifelse(obj$.getParameterType(parameterName) == C_PARAM_USER_DEFINED, "user defined", "default") return(list( paramName = parameterName, paramValue = valueStr, paramValueFormatted = valueStr, type = "function" )) } if (is.list(paramValue)) { resultList <- list() for (listParamName in names(paramValue)) { listParamValue <- paramValue[[listParamName]] type <- "vector" paramValueFormatted <- listParamValue if (.isMatrix(listParamValue)) { m <- .getMatrixFormatted(paramValueFormatted) paramValueFormatted <- m$paramValueFormatted type <- m$type } else if (.isVector(listParamValue)) { paramValueFormatted <- .arrayToString(listParamValue) } entry <- list( paramName = paste0(parameterName, "$", listParamName), paramValue = listParamValue, paramValueFormatted = paramValueFormatted, type = type ) resultList[[length(resultList) + 1]] <- entry } return(resultList) } paramValueFormatted <- paramValue if (obj$.getParameterType(parameterName) %in% c(C_PARAM_USER_DEFINED, C_PARAM_DERIVED, C_PARAM_DEFAULT_VALUE) && !is.numeric(paramValue)) { if (inherits(obj, C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) && parameterName == "typeOfDesign") { paramValueFormatted <- C_TYPE_OF_DESIGN_LIST[[paramValue]] } if (inherits(obj, C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) && parameterName == "typeBetaSpending") { paramValueFormatted <- C_TYPE_OF_DESIGN_BS_LIST[[paramValue]] } } else { formatFunctionName <- obj$.parameterFormatFunctions[[parameterName]] if (!is.null(formatFunctionName)) { paramValueFormatted <- eval(call(formatFunctionName, paramValueFormatted)) if (.isArray(paramValue) && length(dim(paramValue)) == 2) { paramValueFormatted <- matrix(paramValueFormatted, ncol = ncol(paramValue)) } } else if (inherits(obj, C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) && parameterName == "typeOfDesign") { paramValueFormatted <- C_TYPE_OF_DESIGN_LIST[[paramValue]] } else if (inherits(obj, C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) && parameterName == "typeBetaSpending") { paramValueFormatted <- C_TYPE_OF_DESIGN_BS_LIST[[paramValue]] } } type <- "vector" if (.isArray(paramValue) && length(dim(paramValue)) == 3) { arrayFormatted <- paramValueFormatted numberOfEntries <- dim(arrayFormatted)[3] numberOfCols <- dim(arrayFormatted)[2] numberOfRows <- dim(arrayFormatted)[1] enforceListOuput <- numberOfCols > 1 m <- .getMatrixFormatted(arrayFormatted[, , 1], enforceListOuput = enforceListOuput) paramValueFormatted <- m$paramValueFormatted type <- m$type if (numberOfEntries > 1 && numberOfRows > 0) { type <- "array" for (i in 2:numberOfEntries) { m <- .getMatrixFormatted(arrayFormatted[, , i], enforceListOuput = enforceListOuput) paramValueFormatted <- c(paramValueFormatted, m$paramValueFormatted) } } } else if (.isMatrix(paramValue) || .isArray(paramValue)) { m <- .getMatrixFormatted(paramValueFormatted) paramValueFormatted <- m$paramValueFormatted type <- m$type } else if (.isVector(paramValue)) { paramValueFormatted <- .arrayToString(paramValueFormatted) } else if (parameterName == "sided") { paramValueFormatted <- ifelse(paramValue == 1, "one-sided", "two-sided") } return(list( paramName = parameterName, paramValue = paramValue, paramValueFormatted = paramValueFormatted, type = type )) }, error = function(e) { .logError(paste0( "Error in '.getParameterValueFormatted'. ", "Failed to show parameter '%s' (class '%s'): %s" ), parameterName, .getClassName(obj), e) } ) return(NULL) } rpact/R/f_logger.R0000644000176200001440000001527214440602730013502 0ustar liggesusers## | ## | *Logger* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7067 $ ## | Last changed: $Date: 2023-06-09 12:58:32 +0200 (Fr, 09 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | .logBase <- function(s, ..., logLevel) { .assertIsSingleCharacter(s, "s") if (length(list(...)) > 0) { cat(paste0("[", logLevel, "]"), sprintf(s, ...), "\n") } else { cat(paste0("[", logLevel, "]"), s, "\n") } } .logTrace <- function(s, ...) { if (getLogLevel() == C_LOG_LEVEL_TRACE) { .logBase(s, ..., logLevel = C_LOG_LEVEL_TRACE) } } .logDebug <- function(s, ...) { if (getLogLevel() %in% c(C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG)) { .logBase(s, ..., logLevel = C_LOG_LEVEL_DEBUG) } } .logInfo <- function(s, ...) { if (getLogLevel() %in% c( C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO )) { .logBase(s, ..., logLevel = C_LOG_LEVEL_INFO) } } .logWarn <- function(s, ...) { if (getLogLevel() %in% c( C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO, C_LOG_LEVEL_WARN )) { .logBase(s, ..., logLevel = C_LOG_LEVEL_WARN) } } .logError <- function(s, ...) { if (getLogLevel() %in% c( C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO, C_LOG_LEVEL_WARN, C_LOG_LEVEL_ERROR )) { .logBase(s, ..., logLevel = C_LOG_LEVEL_ERROR) } } .getRuntimeString <- function(startTime, ..., endTime = Sys.time(), runtimeUnits = c("secs", "auto"), addBrackets = FALSE) { runtimeUnits <- match.arg(runtimeUnits) if (runtimeUnits == "secs") { time <- as.numeric(difftime(endTime, startTime, units = "secs")) time <- round(time, ifelse(time < 1, 4, 2)) timeStr <- paste0(time, " secs") } else { timeStr <- format(difftime(endTime, startTime)) } if (addBrackets) { timeStr <- paste0("[", timeStr, "]") } return(timeStr) } .logProgress <- function(s, ..., startTime, runtimeUnits = c("secs", "auto")) { if (!(getLogLevel() %in% c( C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO, C_LOG_LEVEL_WARN, C_LOG_LEVEL_ERROR, C_LOG_LEVEL_PROGRESS ))) { return(invisible()) } runtimeUnits <- match.arg(runtimeUnits) timeStr <- .getRuntimeString(startTime, runtimeUnits = runtimeUnits, addBrackets = TRUE) if (length(list(...)) > 0) { cat(paste0("[", C_LOG_LEVEL_PROGRESS, "]"), sprintf(s, ...), timeStr, "\n") } else { cat(paste0("[", C_LOG_LEVEL_PROGRESS, "]"), s, timeStr, "\n") } } #' #' @title #' Set Log Level #' #' @description #' Sets the \code{rpact} log level. #' #' @param logLevel The new log level to set. Can be one of #' "PROGRESS", "ERROR", "WARN", "INFO", "DEBUG", "TRACE", "DISABLED". #' Default is "PROGRESS". #' #' @details #' This function sets the log level of the \code{rpact} internal log message system. #' By default only calculation progress messages will be shown on the output console, #' particularly \code{\link[=getAnalysisResults]{getAnalysisResults()}} shows this kind of messages. #' The output of these messages can be disabled by setting the log level to \code{"DISABLED"}. #' #' @seealso #' \itemize{ #' \item \code{\link[=getLogLevel]{getLogLevel()}} for getting the current log level, #' \item \code{\link[=resetLogLevel]{resetLogLevel()}} for resetting the log level to default. #' } #' #' @examples #' \dontrun{ #' # show debug messages #' setLogLevel("DEBUG") #' #' # disable all log messages #' setLogLevel("DISABLED") #' } #' #' @keywords internal #' #' @export #' setLogLevel <- function(logLevel = c( "PROGRESS", "ERROR", "WARN", "INFO", "DEBUG", "TRACE", "DISABLED" )) { logLevel <- match.arg(logLevel) if (!is.character(logLevel) || !(logLevel %in% c( C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO, C_LOG_LEVEL_WARN, C_LOG_LEVEL_ERROR, C_LOG_LEVEL_PROGRESS, C_LOG_LEVEL_DISABLED ))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'logLevel' must be one of ", "c(", paste(paste0("'", c( C_LOG_LEVEL_TRACE, C_LOG_LEVEL_DEBUG, C_LOG_LEVEL_INFO, C_LOG_LEVEL_WARN, C_LOG_LEVEL_ERROR, C_LOG_LEVEL_PROGRESS, C_LOG_LEVEL_DISABLED ), "'"), collapse = ", "), ")" ) } Sys.setenv("RPACT_LOG_LEVEL" = logLevel) } #' #' @title #' Get Log Level #' #' @description #' Returns the current \code{rpact} log level. #' #' @details #' This function gets the log level of the \code{rpact} internal log message system. #' #' @seealso #' \itemize{ #' \item \code{\link[=setLogLevel]{setLogLevel()}} for setting the log level, #' \item \code{\link[=resetLogLevel]{resetLogLevel()}} for resetting the log level to default. #' } #' #' @return Returns a \code{\link[base]{character}} of length 1 specifying the current log level. #' #' @examples #' # show current log level #' getLogLevel() #' #' @keywords internal #' #' @export #' getLogLevel <- function() { logLevel <- Sys.getenv("RPACT_LOG_LEVEL") if (logLevel == "") { logLevel <- C_LOG_LEVEL_PROGRESS Sys.setenv("RPACT_LOG_LEVEL" = logLevel) } return(logLevel) } #' #' @title #' Reset Log Level #' #' @description #' Resets the \code{rpact} log level. #' #' @details #' This function resets the log level of the \code{rpact} internal log message #' system to the default value \code{"PROGRESS"}. #' #' @seealso #' \itemize{ #' \item \code{\link[=getLogLevel]{getLogLevel()}} for getting the current log level, #' \item \code{\link[=setLogLevel]{setLogLevel()}} for setting the log level. #' } #' #' @examples #' \dontrun{ #' # reset log level to default value #' resetLogLevel() #' } #' #' @keywords internal #' #' @export #' resetLogLevel <- function() { setLogLevel(C_LOG_LEVEL_PROGRESS) } rpact/R/class_core_parameter_set.R0000644000176200001440000021733714450551044016756 0ustar liggesusers## | ## | *Parameter set classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7148 $ ## | Last changed: $Date: 2023-07-03 15:50:22 +0200 (Mo, 03 Jul 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_constants.R #' @include f_core_assertions.R NULL #' #' @name FieldSet #' #' @title #' Field Set #' #' @description #' Basic class for field sets. #' #' @details #' The field set implements basic functions for a set of fields. #' #' @include class_core_plot_settings.R #' #' @keywords internal #' #' @importFrom methods new #' FieldSet <- setRefClass("FieldSet", fields = list( .parameterTypes = "list", .parameterNames = "list", .parameterFormatFunctions = "list", .showParameterTypeEnabled = "logical", .catLines = "character" ), methods = list( .getFieldNames = function() { return(names(.self$getRefClass()$fields())) }, .getVisibleFieldNames = function() { fieldNames <- names(.self$getRefClass()$fields()) fieldNames <- fieldNames[!startsWith(fieldNames, ".")] return(fieldNames) }, .resetCat = function() { .catLines <<- character(0) }, .cat = function(..., file = "", sep = "", fill = FALSE, labels = NULL, append = FALSE, heading = 0, tableColumns = 0, consoleOutputEnabled = TRUE, na = NA_character_) { if (consoleOutputEnabled) { cat(..., file = file, sep = sep, fill = fill, labels = labels, append = append) return(invisible()) } args <- list(...) line <- "" if (length(args) > 0) { if (tableColumns > 0) { values <- unlist(args, use.names = FALSE) values <- values[values != "\n"] for (i in 1:length(values)) { values[i] <- gsub("\n", "", values[i]) } if (!is.null(na) && length(na) == 1 && !is.na(na)) { len <- min(nchar(values)) naStr <- paste0(trimws(na), " ") while (nchar(naStr) < len) { naStr <- paste0(" ", naStr) } values[is.na(values) | nchar(trimws(values)) == 0] <- naStr } line <- paste0(values, collapse = "| ") if (trimws(line) != "" && !grepl("\\| *$", line)) { line <- paste0(line, "|") } line <- paste0("| ", line) extraCells <- tableColumns - length(values) if (extraCells > 0 && trimws(line) != "") { line <- paste0(line, paste0(rep(" |", extraCells), collapse = "")) } line <- paste0(line, "\n") } else { line <- paste0(args, collapse = sep) listItemEnabled <- grepl("^ ", line) headingBaseNumber <- as.integer(getOption("rpact.print.heading.base.number", 0L)) if (is.na(headingBaseNumber)) { headingBaseNumber <- 0L } if (headingBaseNumber < -1) { warning( "Illegal option ", sQuote("rpact.print.heading.base.number"), " (", headingBaseNumber, ") was set to 0" ) headingBaseNumber <- 0L } if (headingBaseNumber > 4) { warning( "Illgeal option ", sQuote("rpact.print.heading.base.number"), " (", headingBaseNumber, ") was set to 4 becasue it was too large" ) headingBaseNumber <- 4L } if (heading > 0) { if (headingBaseNumber == -1) { lineBreak <- "" if (grepl("\n *$", line)) { lineBreak <- "\n\n" } line <- paste0("**", sub(": *", "", trimws(line)), "**", lineBreak) } else { headingCmd <- paste0(rep("#", heading + headingBaseNumber + 1), collapse = "") lineBreak <- "" if (grepl("\n *$", line)) { lineBreak <- "\n\n" } line <- paste0(headingCmd, " ", sub(": *", "", trimws(line)), lineBreak) } } else { parts <- strsplit(line, " *: ")[[1]] if (length(parts) == 2) { line <- paste0("*", trimws(parts[1]), "*: ", parts[2]) } } if (listItemEnabled) { if (grepl("^ ", line)) { line <- sub("^ ", "* ", line) } else { line <- paste0("* ", line) } } } } if (length(.catLines) == 0) { .catLines <<- line } else { .catLines <<- c(.catLines, line) } return(invisible()) }, .getFields = function(values) { flds <- names(.self$getRefClass()$fields()) if (!missing(values)) { flds <- flds[flds %in% values] } result <- setNames(vector("list", length(flds)), flds) for (fld in flds) { result[[fld]] <- .self[[fld]] } return(result) } ) ) #' #' @name ParameterSet #' #' @title #' Parameter Set #' #' @description #' Basic class for parameter sets. #' #' @details #' The parameter set implements basic functions for a set of parameters. #' #' @include f_core_constants.R #' @include f_core_utilities.R #' @include f_parameter_set_utilities.R #' @include f_analysis_utilities.R #' #' @keywords internal #' #' @importFrom methods new #' ParameterSet <- setRefClass("ParameterSet", contains = "FieldSet", fields = list( .parameterTypes = "list", .parameterNames = "list", .parameterFormatFunctions = "list", .showParameterTypeEnabled = "logical", .catLines = "character" ), methods = list( initialize = function(..., .showParameterTypeEnabled = TRUE) { callSuper(..., .showParameterTypeEnabled = .showParameterTypeEnabled ) .parameterTypes <<- list() .parameterNames <<- list() .parameterFormatFunctions <<- list() .catLines <<- character(0) }, .toString = function(startWithUpperCase = FALSE) { s <- .formatCamelCase(.getClassName(.self)) return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) }, .initParameterTypes = function() { for (parameterName in names(.parameterNames)) { .parameterTypes[[parameterName]] <<- C_PARAM_TYPE_UNKNOWN } }, .getParameterType = function(parameterName) { if (is.null(parameterName) || length(parameterName) == 0 || is.na(parameterName)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'parameterName' must be a valid character with length > 0" ) } parameterType <- .parameterTypes[[parameterName]] if (is.null(parameterType)) { return(C_PARAM_TYPE_UNKNOWN) } return(parameterType[1]) }, .getParametersToShow = function() { return(.getVisibleFieldNames()) }, .setParameterType = function(parameterName, parameterType) { if (is.null(parameterName) || length(parameterName) == 0 || is.na(parameterName)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'parameterName' must be a valid character with length > 0" ) } parameterType <- parameterType[1] if (!all(parameterType %in% c( C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE, C_PARAM_GENERATED, C_PARAM_DERIVED, C_PARAM_NOT_APPLICABLE ))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'parameterType' ('", parameterType, "') is invalid" ) } .parameterTypes[[parameterName]] <<- parameterType invisible(parameterType) }, isUserDefinedParameter = function(parameterName) { return(.getParameterType(parameterName) == C_PARAM_USER_DEFINED) }, isDefaultParameter = function(parameterName) { return(.getParameterType(parameterName) == C_PARAM_DEFAULT_VALUE) }, isGeneratedParameter = function(parameterName) { return(.getParameterType(parameterName) == C_PARAM_GENERATED) }, isDerivedParameter = function(parameterName) { return(.getParameterType(parameterName) == C_PARAM_DERIVED) }, isUndefinedParameter = function(parameterName) { return(.getParameterType(parameterName) == C_PARAM_TYPE_UNKNOWN) }, .getInputParameters = function() { params <- .getParametersOfOneGroup(c(C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE)) return(params) }, .getUserDefinedParameters = function() { return(.getParametersOfOneGroup(C_PARAM_USER_DEFINED)) }, .getDefaultParameters = function() { return(.getParametersOfOneGroup(C_PARAM_DEFAULT_VALUE)) }, .getGeneratedParameters = function() { return(.getParametersOfOneGroup(C_PARAM_GENERATED)) }, .getDerivedParameters = function() { return(.getParametersOfOneGroup(C_PARAM_DERIVED)) }, .getUndefinedParameters = function() { return(.getParametersOfOneGroup(C_PARAM_TYPE_UNKNOWN)) }, .getParameterValueIfUserDefinedOrDefault = function(parameterName) { if (isUserDefinedParameter(parameterName) || isDefaultParameter(parameterName)) { return(.self[[parameterName]]) } parameterType <- .self$getRefClass()$fields()[[parameterName]] if (parameterType == "numeric") { return(NA_real_) } if (parameterType == "integer") { return(NA_integer_) } if (parameterType == "character") { return(NA_character_) } return(NA) }, .getParametersOfOneGroup = function(parameterType) { if (length(parameterType) == 1) { parameterNames <- names(.parameterTypes[.parameterTypes == parameterType]) } else { parameterNames <- names(.parameterTypes[which(.parameterTypes %in% parameterType)]) } parametersToShow <- .getParametersToShow() if (is.null(parametersToShow) || length(parametersToShow) == 0) { return(parameterNames) } return(parametersToShow[parametersToShow %in% parameterNames]) }, .showParameterType = function(parameterName) { if (!.showParameterTypeEnabled) { return(" ") } return(paste0("[", .getParameterType(parameterName), "]")) }, .showAllParameters = function(consoleOutputEnabled = TRUE) { parametersToShow <- .getVisibleFieldNamesOrdered() for (parameter in parametersToShow) { .showParameter(parameter, showParameterType = TRUE, consoleOutputEnabled = consoleOutputEnabled ) } }, .getVisibleFieldNamesOrdered = function() { visibleFieldNames <- .getVisibleFieldNames() parametersToShowSorted <- .getParametersToShow() if (is.null(parametersToShowSorted) || length(parametersToShowSorted) == 0) { return(visibleFieldNames) } visibleFieldNames <- visibleFieldNames[!(visibleFieldNames %in% parametersToShowSorted)] visibleFieldNames <- c(parametersToShowSorted, visibleFieldNames) return(visibleFieldNames) }, .show = function(..., consoleOutputEnabled = FALSE) { showType <- .getOptionalArgument("showType", ...) if (!is.null(showType) && showType == 2) { .cat("Technical developer summary of the ", .self$.toString(), " object (", methods::classLabel(class(.self)), "):\n\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) .showAllParameters(consoleOutputEnabled = consoleOutputEnabled) .showParameterTypeDescription(consoleOutputEnabled = consoleOutputEnabled) } else { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "method '.show()' is not implemented in class '", .getClassName(.self), "'" ) } }, .catMarkdownText = function(...) { .show(consoleOutputEnabled = FALSE, ...) if (length(.catLines) == 0) { return(invisible()) } for (line in .catLines) { cat(line) } }, .showParametersOfOneGroup = function(parameters, title, orderByParameterName = TRUE, consoleOutputEnabled = TRUE) { output <- "" if (is.null(parameters) || length(parameters) == 0 || all(is.na(parameters))) { if (!missing(title) && !is.null(title) && !is.na(title) && consoleOutputEnabled) { output <- paste0(title, ": not available\n\n") .cat(output, heading = 2, consoleOutputEnabled = consoleOutputEnabled) } invisible(output) } else { if (orderByParameterName) { parameters <- sort(parameters) } if (!missing(title) && !is.null(title) && !is.na(title)) { output <- paste0(title, ":\n") .cat(output, heading = 2, consoleOutputEnabled = consoleOutputEnabled) } for (parameterName in parameters) { output <- paste0(output, .showParameter(parameterName, consoleOutputEnabled = consoleOutputEnabled )) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) output <- paste0(output, "\n") invisible(output) } }, .showParameter = function(parameterName, showParameterType = FALSE, consoleOutputEnabled = TRUE) { tryCatch( { params <- .getParameterValueFormatted(obj = .self, parameterName = parameterName) if (is.null(params) || !is.list(params)) { return(invisible("")) } if (!is.null(names(params)) && "paramValue" %in% names(params)) { return(.showParameterSingle( param = params, parameterName = parameterName, showParameterType = showParameterType, consoleOutputEnabled = consoleOutputEnabled )) } output <- "" for (i in 1:length(params)) { param <- params[[i]] category <- NULL parts <- strsplit(param$paramName, "$", fixed = TRUE)[[1]] if (length(parts) == 2) { parameterName <- parts[1] param$paramName <- parameterName category <- parts[2] categoryCaption <- .parameterNames[[category]] if (is.null(categoryCaption)) { categoryCaption <- paste0("%", category, "%") } category <- categoryCaption } outputPart <- .showParameterSingle( param = param, parameterName = parameterName, category = category, showParameterType = showParameterType, consoleOutputEnabled = consoleOutputEnabled ) if (nchar(output) > 0) { output <- paste0(output, "\n", outputPart) } else { output <- outputPart } } return(invisible(output)) }, error = function(e) { if (consoleOutputEnabled) { warning("Failed to show parameter '", parameterName, "': ", e$message) } } ) }, .showParameterSingle = function(param, parameterName, ..., category = NULL, showParameterType = FALSE, consoleOutputEnabled = TRUE) { if (is.null(param)) { return(invisible("")) } output <- "" tryCatch( { if (param$type == "array" && length(dim(param$paramValue)) == 3) { numberOfEntries <- dim(param$paramValue)[3] numberOfRows <- dim(param$paramValue)[1] if (numberOfEntries > 0 && numberOfRows > 0) { index <- 1 for (i in 1:numberOfEntries) { for (j in 1:numberOfRows) { output <- paste0(output, .showParameterFormatted( paramName = param$paramName, paramValue = param$paramValue[j, , i], paramValueFormatted = param$paramValueFormatted[[index]], showParameterType = showParameterType, category = i, matrixRow = ifelse(numberOfRows == 1, NA_integer_, j), consoleOutputEnabled = consoleOutputEnabled, paramNameRaw = parameterName, numberOfCategories = numberOfEntries )) index <- index + 1 } } } } else if (param$type %in% c("matrix", "array")) { n <- length(param$paramValueFormatted) if (n > 0) { for (i in 1:n) { paramValue <- param$paramValue if (is.array(paramValue) && length(dim(paramValue)) == 3 && dim(paramValue)[3] == 1) { paramValue <- paramValue[i, , 1] } else if (dim(paramValue)[1] > 1 || dim(paramValue)[2] > 1) { paramValue <- paramValue[i, ] } output <- paste0(output, .showParameterFormatted( paramName = param$paramName, paramValue = paramValue, paramValueFormatted = param$paramValueFormatted[[i]], showParameterType = showParameterType, category = category, matrixRow = ifelse(n == 1, NA_integer_, i), consoleOutputEnabled = consoleOutputEnabled, paramNameRaw = parameterName, numberOfCategories = n )) } } } else { output <- .showParameterFormatted( paramName = param$paramName, paramValue = param$paramValue, paramValueFormatted = param$paramValueFormatted, showParameterType = showParameterType, category = category, consoleOutputEnabled = consoleOutputEnabled, paramNameRaw = parameterName ) } }, error = function(e) { if (consoleOutputEnabled) { warning("Failed to show single parameter '", parameterName, "' (", param$type, "): ", e$message) } } ) return(invisible(output)) }, .extractParameterNameAndValue = function(parameterName) { d <- regexpr(paste0("\\..+\\$"), parameterName) if (d[1] != 1) { return(list(parameterName = parameterName, paramValue = get(parameterName))) } index <- attr(d, "match.length") objectName <- substr(parameterName, 1, index - 1) parameterName <- substr(parameterName, index + 1, nchar(parameterName)) paramValue <- get(objectName)[[parameterName]] # .closedTestResults$rejected if (objectName == ".closedTestResults" && parameterName == "rejected") { paramValueLogical <- as.logical(paramValue) if (is.matrix(paramValue)) { paramValueLogical <- matrix(paramValueLogical, ncol = ncol(paramValue)) } paramValue <- paramValueLogical } return(list(parameterName = parameterName, paramValue = paramValue)) }, .showUnknownParameters = function(consoleOutputEnabled = TRUE) { params <- .getUndefinedParameters() if (length(params) > 0) { .showParametersOfOneGroup(params, "ISSUES (parameters with undefined type)", consoleOutputEnabled = consoleOutputEnabled ) } }, .showParameterFormatted = function(paramName, paramValue, ..., paramValueFormatted = NA_character_, showParameterType = FALSE, category = NULL, matrixRow = NA_integer_, consoleOutputEnabled = TRUE, paramNameRaw = NA_character_, numberOfCategories = NA_integer_) { if (!is.na(paramNameRaw)) { paramCaption <- .parameterNames[[paramNameRaw]] } if (is.null(paramCaption)) { paramCaption <- .parameterNames[[paramName]] } if (is.null(paramCaption)) { paramCaption <- paste0("%", paramName, "%") } if (!is.null(category) && !is.na(category)) { if (.isMultiArmSimulationResults(.self) && paramName == "singleNumberOfEventsPerStage") { if (!inherits(.self, "SimulationResultsEnrichmentSurvival") && !is.na(numberOfCategories) && numberOfCategories == category) { category <- "control" } paramCaption <- paste0(paramCaption, " {", category, "}") } else if (paramName == "effectList") { paramCaption <- paste0(paramCaption, " [", category, "]") } else if (.isEnrichmentSimulationResults(.self)) { categoryCaption <- .getCategoryCaptionEnrichment(.self, paramName, category) paramCaption <- paste0(paramCaption, " (", categoryCaption, ")") } else { paramCaption <- paste0(paramCaption, " (", category, ")") } if (!is.na(matrixRow)) { if (paramName == "effectList") { paramCaption <- paste0(paramCaption, " (", matrixRow, ")") } else { paramCaption <- paste0(paramCaption, " [", matrixRow, "]") } } } else if (!is.na(matrixRow)) { if (.isMultiArmAnalysisResults(.self) && paramName %in% c( "conditionalErrorRate", "secondStagePValues", "adjustedStageWisePValues", "overallAdjustedTestStatistics" )) { treatments <- .closedTestResults$.getHypothesisTreatmentArmVariants()[matrixRow] paramCaption <- paste0( "Treatment", ifelse(grepl(",", treatments), "s", ""), " ", treatments, " vs. control" ) } else if (.isEnrichmentAnalysisResults(.self) || .isEnrichmentStageResults(.self) || (inherits(.self, "ClosedCombinationTestResults") && isTRUE(.self$.enrichment))) { if (paramName %in% c( "indices", "conditionalErrorRate", "secondStagePValues", "adjustedStageWisePValues", "overallAdjustedTestStatistics", "rejectedIntersections" )) { if (.isEnrichmentAnalysisResults(.self)) { populations <- .closedTestResults$.getHypothesisPopulationVariants()[matrixRow] } else if (inherits(.self, "ClosedCombinationTestResults")) { populations <- .self$.getHypothesisPopulationVariants()[matrixRow] } else { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "only ClosedCombinationTestResults ", "supports function .getHypothesisPopulationVariants() (object is ", .getClassName(.self), ")" ) } paramCaption <- paste0(paramCaption, " ", populations) } else { if (!is.na(numberOfCategories) && numberOfCategories == matrixRow) { paramCaption <- paste0(paramCaption, " F") } else { paramCaption <- paste0(paramCaption, " S", matrixRow) } } } else if (.isMultiArmAnalysisResults(.self) || grepl("StageResultsMultiArm", .getClassName(.self)) || (inherits(.self, "SimulationResults") && paramName == "effectMatrix") || (inherits(.self, "ClosedCombinationTestResults") && paramName %in% c("rejected", "separatePValues"))) { paramCaption <- paste0(paramCaption, " (", matrixRow, ")") } else { paramCaption <- paste0(paramCaption, " [", matrixRow, "]") } } if (is.null(paramValueFormatted) || length(paramValueFormatted) == 0 || is.na(paramValueFormatted)) { paramValueFormatted <- paramValue } if (is.list(paramValueFormatted)) { paramValueFormatted <- .listToString(paramValueFormatted) } if (is.function(paramValue) || grepl("Function$", paramName)) { paramValueFormatted <- ifelse( .getParameterType(paramName) == C_PARAM_USER_DEFINED, ifelse(.isCppCode(paramValueFormatted), "user defined (C++)", "user defined"), "default" ) } prefix <- ifelse(showParameterType, .showParameterType(paramName), "") variableNameFormatted <- .getFormattedVariableName( name = paramCaption, n = .getNChar(), prefix = prefix ) output <- paste(variableNameFormatted, paramValueFormatted, "\n") .cat(output, consoleOutputEnabled = consoleOutputEnabled) invisible(output) }, .getNChar = function() { if (length(.parameterNames) == 0) { return(40) } return(min(40, max(nchar(.parameterNames))) + 4) }, .showParameterTypeDescription = function(consoleOutputEnabled = consoleOutputEnabled) { .cat("\n", consoleOutputEnabled = consoleOutputEnabled) .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) .cat(" ", C_PARAM_USER_DEFINED, ": user defined\n", consoleOutputEnabled = consoleOutputEnabled) .cat(" ", C_PARAM_DERIVED, ": derived value\n", consoleOutputEnabled = consoleOutputEnabled) .cat(" ", C_PARAM_DEFAULT_VALUE, ": default value\n", consoleOutputEnabled = consoleOutputEnabled) .cat(" ", C_PARAM_GENERATED, ": generated/calculated value\n", consoleOutputEnabled = consoleOutputEnabled) .cat(" ", C_PARAM_NOT_APPLICABLE, ": not applicable or hidden\n", consoleOutputEnabled = consoleOutputEnabled) }, .printAsDataFrame = function(parameterNames, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, handleParameterNamesAsToBeExcluded = FALSE, lineBreakEnabled = FALSE) { if (.isTrialDesign(.self)) { tableColumnNames <- .getTableColumnNames(design = .self) } else { tableColumnNames <- C_TABLE_COLUMN_NAMES } if (.isTrialDesignPlan(.self)) { parameterNames <- NULL } dataFrame <- .getAsDataFrame( parameterSet = .self, parameterNames = parameterNames, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters, handleParameterNamesAsToBeExcluded = handleParameterNamesAsToBeExcluded, returnParametersAsCharacter = TRUE, tableColumnNames = tableColumnNames ) result <- as.matrix(dataFrame) if (.isTrialDesignPlan(.self)) { dimnames(result)[[1]] <- paste(" ", c(1:nrow(dataFrame))) } else if (!is.null(dataFrame[["stages"]])) { dimnames(result)[[1]] <- paste(" Stage", dataFrame$stages) } print(result, quote = FALSE, right = FALSE) }, .getNumberOfRows = function(parameterNames) { numberOfRows <- 1 for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (is.vector(parameterValues) && length(parameterValues) > numberOfRows) { numberOfRows <- length(parameterValues) } else if (is.matrix(parameterValues) && (nrow(parameterValues) == 1 || ncol(parameterValues) == 1) && length(parameterValues) > numberOfRows) { numberOfRows <- length(parameterValues) } } return(numberOfRows) }, .containsMultidimensionalParameters = function(parameterNames) { for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (!is.null(parameterValues) && is.matrix(parameterValues) && nrow(parameterValues) > 0 && ncol(parameterValues) > 0) { return(TRUE) } } return(FALSE) }, .getMultidimensionalNumberOfStages = function(parameterNames) { if (!is.null(.self[[".design"]])) { return(.self$.design$kMax) } n <- 1 for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (!is.null(parameterValues) && is.matrix(parameterValues) && ncol(parameterValues) > 0 && nrow(parameterValues) > n) { n <- nrow(parameterValues) } } return(n) }, .getVariedParameter = function(parameterNames, numberOfVariants) { # search for user defined parameters for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (!is.null(parameterValues) && !is.matrix(parameterValues) && length(parameterValues) == numberOfVariants && parameterName %in% C_VARIABLE_DESIGN_PLAN_PARAMETERS && .getParameterType(parameterName) == C_PARAM_USER_DEFINED) { return(parameterName) } } # search for default values for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (!is.null(parameterValues) && !is.matrix(parameterValues) && length(parameterValues) == numberOfVariants && parameterName %in% C_VARIABLE_DESIGN_PLAN_PARAMETERS && .getParameterType(parameterName) == C_PARAM_DEFAULT_VALUE) { return(parameterName) } } return(NULL) }, .getDataFrameColumnCaption = function(parameterName, tableColumnNames, niceColumnNamesEnabled) { if (length(parameterName) == 0 || parameterName == "") { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'parameterName' must be a valid parameter name") } tableColumnName <- tableColumnNames[[parameterName]] return(ifelse(niceColumnNamesEnabled && !is.null(tableColumnName), tableColumnName, parameterName )) }, .getUnidimensionalNumberOfStages = function(parameterNames) { kMax <- .self[["kMax"]] if (is.null(kMax) && !is.null(.self[[".design"]])) { kMax <- .self[[".design"]][["kMax"]] } if (!is.null(kMax) && length(kMax) == 1 && is.integer(kMax)) { return(kMax) } n <- 1 for (parameterName in parameterNames) { parameterValues <- .self[[parameterName]] if (!is.null(parameterValues) && !is.matrix(parameterValues) && length(parameterValues) > n) { n <- length(parameterValues) } } return(n) }, .formatDataFrameParametersAsCharacter = function(dataFrame, parameterName, parameterValues, parameterCaption) { tryCatch( { formatFunctionName <- .parameterFormatFunctions[[parameterName]] if (!is.null(formatFunctionName)) { parameterValuesFormatted <- eval(call(formatFunctionName, parameterValues)) } else { parameterValuesFormatted <- as.character(parameterValues) } if (parameterName == "sided") { parameterValuesFormatted <- ifelse(parameterValues == 1, "one-sided", "two-sided" ) } if (!is.null(dataFrame[[parameterCaption]])) { parameterValuesFormatted[is.na(dataFrame[[parameterCaption]])] <- "" } parameterValuesFormatted[is.na(parameterValuesFormatted)] <- "" parameterValuesFormatted[parameterValuesFormatted == "NA"] <- "" if (is.null(dataFrame)) { dataFrame <- data.frame(x = parameterValuesFormatted) names(dataFrame) <- parameterCaption } else { dataFrame[[parameterCaption]] <- parameterValuesFormatted } }, error = function(e) { .logError(paste0( "Error in '.getAsDataFrame'. Failed to show parameter '%s' ", "(class '%s'): %s" ), parameterName, .getClassName(.self), e) } ) }, # # Returns a sub-list. # # @param x A list from which you would like to get a sub-list. # @param listEntryNames A vector of names which specify the entries of the sub-list to return. # .getSubListByNames = function(x, listEntryNames) { "Returns a sub-list." if (!is.list(x)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'x' must be a list") } if (!is.character(listEntryNames)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'listEntryNames' must be a character vector") } return(x[which(names(x) %in% listEntryNames)]) }, .isMultiHypothesesObject = function() { return(.isEnrichmentAnalysisResults(.self) || .isEnrichmentStageResults(.self) || .isMultiArmAnalysisResults(.self) || .isMultiArmStageResults(.self)) }, .isEnrichmentObject = function() { return(.isEnrichmentAnalysisResults(.self) || .isEnrichmentStageResults(.self)) } ) ) .getMultidimensionalNumberOfVariants <- function(parameterSet, parameterNames) { if (!is.null(parameterSet[["effectList"]])) { effectMatrixName <- .getSimulationEnrichmentEffectMatrixName(parameterSet) return(nrow(parameterSet$effectList[[effectMatrixName]])) } parameterNames <- parameterNames[!(parameterNames %in% c( "accrualTime", "accrualIntensity", "plannedSubjects", "plannedEvents", "minNumberOfSubjectsPerStage", "maxNumberOfSubjectsPerStage", "minNumberOfEventsPerStage", "maxNumberOfEventsPerStage", "piecewiseSurvivalTime", "lambda2", "adaptations", "adjustedStageWisePValues", "overallAdjustedTestStatistics" ))] if (!is.null(parameterSet[[".piecewiseSurvivalTime"]]) && parameterSet$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { parameterNames <- parameterNames[!(parameterNames %in% c("lambda1"))] } n <- 1 for (parameterName in parameterNames) { parameterValues <- parameterSet[[parameterName]] if (!is.null(parameterValues) && (is.matrix(parameterValues) || !is.array(parameterValues))) { if (is.matrix(parameterValues)) { if (parameterSet$.isMultiHypothesesObject()) { if (nrow(parameterValues) > n && ncol(parameterValues) > 0) { n <- nrow(parameterValues) } } else if (nrow(parameterValues) > 0 && ncol(parameterValues) > n) { n <- ncol(parameterValues) } } else if (length(parameterValues) > n && !parameterSet$.isMultiHypothesesObject()) { n <- length(parameterValues) } } } return(n) } .getDataFrameColumnValues <- function(parameterSet, parameterName, numberOfVariants, numberOfStages, includeAllParameters, mandatoryParameterNames) { if (parameterSet$.getParameterType(parameterName) == C_PARAM_TYPE_UNKNOWN && parameterName != "futilityStop") { return(NULL) } if (!includeAllParameters && parameterSet$.getParameterType(parameterName) == C_PARAM_NOT_APPLICABLE && !(parameterName %in% mandatoryParameterNames)) { return(NULL) } parameterValues <- parameterSet[[parameterName]] if (is.null(parameterValues) || length(parameterValues) == 0) { return(NULL) } if (is.function(parameterValues)) { return(NULL) } if (is.array(parameterValues) && !is.matrix(parameterValues)) { return(NULL) } if (parameterName %in% c("adjustedStageWisePValues", "overallAdjustedTestStatistics")) { return(NULL) } if (!is.matrix(parameterValues)) { if (length(parameterValues) == 1) { return(rep(parameterValues, numberOfVariants * numberOfStages)) } if (parameterSet$.isMultiHypothesesObject()) { if (length(parameterValues) == numberOfStages) { return(as.vector(sapply(FUN = rep, X = parameterValues, times = numberOfVariants))) } } if (length(parameterValues) == numberOfVariants) { return(rep(parameterValues, numberOfStages)) } if (length(parameterValues) == numberOfStages && parameterName %in% c( "plannedEvents", "plannedSubjects", "minNumberOfEventsPerStage", "maxNumberOfEventsPerStage", "minNumberOfSubjectsPerStage", "maxNumberOfSubjectsPerStage", "allocationRatioPlanned" )) { values <- c() for (stage in 1:numberOfStages) { values <- c(values, rep(parameterValues[stage], numberOfVariants)) } return(values) } if (parameterName %in% c( "accrualTime", "accrualIntensity", "plannedEvents", "plannedSubjects", "minNumberOfEventsPerStage", "maxNumberOfEventsPerStage", "minNumberOfSubjectsPerStage", "maxNumberOfSubjectsPerStage", "piecewiseSurvivalTime", "lambda2" )) { return(NULL) } stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "parameter '", parameterName, "' has an invalid ", "dimension (length is ", length(parameterValues), ")" ) } else if (parameterName == "effectMatrix") { # return effect matrix row if 'effectMatrix' is user defined if (parameterSet$.getParameterType("effectMatrix") == C_PARAM_USER_DEFINED) { return(1:ncol(parameterValues)) } return(parameterValues[nrow(parameterValues), ]) } if (grepl("futility|alpha0Vec|earlyStop", parameterName) && nrow(parameterValues) == numberOfStages - 1) { parameterValues <- rbind(parameterValues, rep(NA_real_, ncol(parameterValues))) } if (nrow(parameterValues) == numberOfStages && ncol(parameterValues) == 1) { columnValues <- c() for (parameterValue in parameterValues) { columnValues <- c(columnValues, rep(parameterValue, numberOfVariants)) } return(columnValues) } if (nrow(parameterValues) == numberOfStages && ncol(parameterValues) == numberOfVariants) { columnValues <- c() for (i in 1:nrow(parameterValues)) { for (j in 1:ncol(parameterValues)) { columnValues <- c(columnValues, parameterValues[i, j]) } } return(columnValues) } # applicable for analysis enrichment if (parameterSet$.isMultiHypothesesObject()) { if (nrow(parameterValues) %in% c(1, numberOfVariants) && ncol(parameterValues) %in% c(1, numberOfStages)) { columnValues <- c() for (j in 1:ncol(parameterValues)) { for (i in 1:nrow(parameterValues)) { columnValues <- c(columnValues, parameterValues[i, j]) } } if (nrow(parameterValues) == 1) { columnValues <- as.vector(sapply(FUN = rep, X = columnValues, times = numberOfVariants)) } if (ncol(parameterValues) == 1) { columnValues <- rep(columnValues, numberOfStages) } return(columnValues) } } if (nrow(parameterValues) == 1 && ncol(parameterValues) == 1) { return(rep(parameterValues[1, 1], numberOfStages * numberOfVariants)) } if (nrow(parameterValues) == 1 && ncol(parameterValues) == numberOfVariants) { return(rep(parameterValues[1, ], numberOfStages)) } if (nrow(parameterValues) == numberOfStages && ncol(parameterValues) == 1) { return(rep(parameterValues[, 1], numberOfVariants)) } stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "parameter '", parameterName, "' has an invalid ", "dimension (", nrow(parameterValues), " x ", ncol(parameterValues), "); ", "expected was (", numberOfStages, " x ", numberOfVariants, ")" ) } .getAsDataFrameMultidimensional <- function(parameterSet, parameterNames, niceColumnNamesEnabled, includeAllParameters, returnParametersAsCharacter, tableColumnNames, mandatoryParameterNames) { numberOfVariants <- .getMultidimensionalNumberOfVariants(parameterSet, parameterNames) numberOfStages <- parameterSet$.getMultidimensionalNumberOfStages(parameterNames) stagesCaption <- parameterSet$.getDataFrameColumnCaption( "stages", tableColumnNames, niceColumnNamesEnabled ) dataFrame <- data.frame( stages = sort(rep(1:numberOfStages, numberOfVariants)) ) names(dataFrame) <- stagesCaption if (parameterSet$.isEnrichmentObject()) { populations <- character(0) for (i in 1:numberOfVariants) { populations <- c(populations, ifelse(i == numberOfVariants, "F", paste0("S", i))) } dataFrame$populations <- rep(populations, numberOfStages) populationsCaption <- parameterSet$.getDataFrameColumnCaption( "populations", tableColumnNames, niceColumnNamesEnabled ) names(dataFrame) <- c(stagesCaption, populationsCaption) } variedParameter <- parameterSet$.getVariedParameter(parameterNames, numberOfVariants) tryCatch( { if (!is.null(variedParameter) && variedParameter != "stages") { variedParameterCaption <- parameterSet$.getDataFrameColumnCaption( variedParameter, tableColumnNames, niceColumnNamesEnabled ) dataFrame[[variedParameterCaption]] <- rep(parameterSet[[variedParameter]], numberOfStages) } }, error = function(e) { warning( ".getAsDataFrameMultidimensional: ", "failed to add 'variedParameterCaption' to data.frame; ", e$message ) } ) usedParameterNames <- character(0) for (parameterName in parameterNames) { tryCatch( { if (!(parameterName %in% c("stages", "adaptations", "effectList")) && !grepl("Function$", parameterName) && (is.null(variedParameter) || parameterName != variedParameter)) { columnValues <- .getDataFrameColumnValues( parameterSet, parameterName, numberOfVariants, numberOfStages, includeAllParameters, mandatoryParameterNames ) if (!is.null(columnValues)) { columnCaption <- parameterSet$.getDataFrameColumnCaption( parameterName, tableColumnNames, niceColumnNamesEnabled ) dataFrame[[columnCaption]] <- columnValues if (returnParametersAsCharacter) { parameterSet$.formatDataFrameParametersAsCharacter( dataFrame, parameterName, columnValues, columnCaption ) } usedParameterNames <- c(usedParameterNames, parameterName) } } if (parameterName == "effectList") { effectMatrixName <- .getSimulationEnrichmentEffectMatrixName(parameterSet) effectMatrixNameSingular <- sub("s$", "", effectMatrixName) effectMatrix <- parameterSet$effectList[[effectMatrixName]] if (ncol(effectMatrix) == 1) { dataFrame[[effectMatrixNameSingular]] <- rep(effectMatrix, numberOfStages) } else { for (j in 1:ncol(effectMatrix)) { dataFrame[[paste0(effectMatrixNameSingular, j)]] <- rep(effectMatrix[, j], numberOfStages) } } dataFrame$situation <- rep(1:nrow(effectMatrix), numberOfStages) usedParameterNames <- c(usedParameterNames, parameterName) } }, error = function(e) { warning( ".getAsDataFrameMultidimensional: failed to add parameter ", sQuote(parameterName), " to data.frame; ", e$message ) } ) } if (includeAllParameters) { extraParameterNames <- names(parameterSet) extraParameterNames <- extraParameterNames[!grepl("^\\.", extraParameterNames)] extraParameterNames <- extraParameterNames[!(extraParameterNames %in% parameterNames)] extraParameterNames <- unique(c(parameterNames[!(parameterNames %in% usedParameterNames)], extraParameterNames)) for (extraParameter in extraParameterNames) { tryCatch( { if (parameterSet$.getParameterType(extraParameter) != C_PARAM_TYPE_UNKNOWN) { value <- parameterSet[[extraParameter]] if (!is.null(value) && length(value) > 0 && !is.matrix(value) && !is.array(value) && !is.data.frame(value) && (is.numeric(value) || is.character(value) || is.logical(value))) { columnCaption <- parameterSet$.getDataFrameColumnCaption( extraParameter, tableColumnNames, niceColumnNamesEnabled ) if (length(value) == 1) { dataFrame[[columnCaption]] <- rep(value, nrow(dataFrame)) } else { dataFrame[[columnCaption]] <- rep(.arrayToString(value, maxLength = 10), nrow(dataFrame)) } } } }, error = function(e) { warning( ".getAsDataFrameMultidimensional: failed to add extra parameter ", sQuote(parameterName), " to data.frame; ", e$message ) } ) } } return(dataFrame) } .getAsDataFrameUnidimensional <- function(parameterSet, parameterNames, niceColumnNamesEnabled, includeAllParameters, returnParametersAsCharacter, tableColumnNames) { numberOfStages <- parameterSet$.getUnidimensionalNumberOfStages(parameterNames) dataFrame <- NULL for (parameterName in parameterNames) { tryCatch( { parameterCaption <- ifelse(niceColumnNamesEnabled && !is.null(tableColumnNames[[parameterName]]), tableColumnNames[[parameterName]], parameterName ) parameterValues <- parameterSet[[parameterName]] if (parameterName == "futilityBounds") { parameterValues[parameterValues == C_FUTILITY_BOUNDS_DEFAULT] <- -Inf } if (length(parameterValues) == 1) { parameterValues <- rep(parameterValues, numberOfStages) } else { while (length(parameterValues) < numberOfStages) { parameterValues <- c(parameterValues, NA) } } if (includeAllParameters || ( parameterSet$.getParameterType(parameterName) != C_PARAM_NOT_APPLICABLE && sum(is.na(parameterValues)) < length(parameterValues))) { if (is.null(dataFrame)) { dataFrame <- data.frame(x = parameterValues) names(dataFrame) <- parameterCaption } else { dataFrame[[parameterCaption]] <- parameterValues } } if (returnParametersAsCharacter) { parameterSet$.formatDataFrameParametersAsCharacter( dataFrame, parameterName, parameterValues, parameterCaption ) } }, error = function(e) { .logError("Failed to add parameter '%s' to data.frame: %s", parameterName, e) } ) } return(dataFrame) } .getAsDataFrame <- function(..., parameterSet, parameterNames, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, handleParameterNamesAsToBeExcluded = FALSE, returnParametersAsCharacter = FALSE, tableColumnNames = C_TABLE_COLUMN_NAMES, mandatoryParameterNames = character(0)) { parameterNamesToBeExcluded <- c() if (handleParameterNamesAsToBeExcluded) { parameterNamesToBeExcluded <- parameterNames parameterNames <- parameterSet$.getVisibleFieldNamesOrdered() if (!is.null(parameterNamesToBeExcluded) && length(parameterNamesToBeExcluded) > 0) { parameterNames <- parameterNames[!(parameterNames %in% parameterNamesToBeExcluded)] } } else if (is.null(parameterNames)) { parameterNames <- parameterSet$.getVisibleFieldNamesOrdered() } parameterNames <- parameterNames[!grepl("^\\.", parameterNames)] parametersToIgnore <- character(0) if (!is.null(parameterSet[[".piecewiseSurvivalTime"]]) && parameterSet$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { parametersToIgnore <- c( parametersToIgnore, "lambda1", "lambda2", "median1", "median2", "pi1", "pi2", "piecewiseSurvivalTime" ) parametersToIgnore <- intersect(parametersToIgnore, parameterNames) } if (parameterSet$.getParameterType("hazardRatio") == C_PARAM_GENERATED && !is.null(parameterSet[[".piecewiseSurvivalTime"]]) && isTRUE(parameterSet$.piecewiseSurvivalTime$piecewiseSurvivalEnabled)) { parametersToIgnore <- c(parametersToIgnore, "hazardRatio") } if (!inherits(parameterSet, "AccrualTime")) { accrualTime <- parameterSet[["accrualTime"]] if (!is.null(accrualTime) && length(accrualTime) > 1) { parametersToIgnore <- c(parametersToIgnore, c("accrualTime", "accrualIntensity")) } } if (length(parametersToIgnore) > 0) { parameterNames <- parameterNames[!(parameterNames %in% parametersToIgnore)] } if (parameterSet$.containsMultidimensionalParameters(parameterNames)) { return(.addDelayedInformationRates(.getAsDataFrameMultidimensional( parameterSet, parameterNames, niceColumnNamesEnabled, includeAllParameters, returnParametersAsCharacter, tableColumnNames, mandatoryParameterNames ))) } # remove matrices for (parameterName in parameterNames) { parameterValues <- parameterSet[[parameterName]] if (is.matrix(parameterValues) && nrow(parameterValues) != 1 && ncol(parameterValues) != 1) { parameterNames <- parameterNames[parameterNames != parameterName] } } if (length(parameterNames) == 0) { return(data.frame()) } return(.addDelayedInformationRates(.getAsDataFrameUnidimensional( parameterSet, parameterNames, niceColumnNamesEnabled, includeAllParameters, returnParametersAsCharacter, tableColumnNames ))) } .getCategoryCaptionEnrichment <- function(parameterSet, parameterName, categoryNumber) { categoryCaption <- categoryNumber if (parameterName %in% c("sampleSizes", "singleNumberOfEventsPerStage")) { categoryCaption <- parameterSet$effectList$subGroups[categoryNumber] maxNumberOfDigits <- max(nchar(sub("\\D*", "", parameterSet$effectList$subGroups))) if (parameterSet$populations > 2 && grepl(paste0("^S\\d{1,", maxNumberOfDigits - 1, "}$"), categoryCaption)) { categoryCaption <- paste0(categoryCaption, " only") } } else { if (parameterSet$populations <= 2) { categoryCaption <- ifelse(categoryNumber == parameterSet$populations, "F", "S") } else { categoryCaption <- ifelse(categoryNumber == parameterSet$populations, "F", paste0("S", categoryNumber)) } } return(categoryCaption) } #' #' @title #' Names of a Field Set Object #' #' @description #' Function to get the names of a \code{\link{FieldSet}} object. #' #' @param x A \code{\link{FieldSet}} object. #' #' @details #' Returns the names of a field set that can be accessed by the user. #' #' @template return_names #' #' @export #' #' @keywords internal #' names.FieldSet <- function(x) { return(x$.getVisibleFieldNames()) } #' #' @title #' Print Field Set Values #' #' @description #' \code{print} prints its \code{\link{FieldSet}} argument and returns it invisibly (via \code{invisible(x)}). #' #' @param x A \code{\link{FieldSet}} object. #' @inheritParams param_three_dots #' #' @details #' Prints the field set. #' #' @export #' #' @keywords internal #' print.FieldSet <- function(x, ...) { x$show() invisible(x) } #' #' @title #' Coerce Parameter Set to a Data Frame #' #' @description #' Returns the \code{ParameterSet} as data frame. #' #' @param x A \code{\link{FieldSet}} object. #' @inheritParams param_niceColumnNamesEnabled #' @inheritParams param_includeAllParameters #' @inheritParams param_three_dots #' #' @details #' Coerces the parameter set to a data frame. #' #' @template return_dataframe #' #' @export #' #' @keywords internal #' as.data.frame.ParameterSet <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ...) { .warnInCaseOfUnknownArguments(functionName = "as.data.frame", ...) return(.getAsDataFrame( parameterSet = x, parameterNames = NULL, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters )) } #' #' @title #' Field Set Transpose #' #' @description #' Given a \code{FieldSet} \code{x}, t returns the transpose of \code{x}. #' #' @param x A \code{FieldSet}. #' #' @details #' Implementation of the base R generic function \code{\link[base]{t}} #' #' @keywords internal #' #' @export #' setMethod( "t", "FieldSet", function(x) { x <- as.matrix(x, niceColumnNamesEnabled = TRUE) return(t(x)) } ) #' #' @title #' Coerce Field Set to a Matrix #' #' @description #' Returns the \code{FrameSet} as matrix. #' #' @param x A \code{\link{FieldSet}} object. #' @param enforceRowNames If \code{TRUE}, row names will be created #' depending on the object type, default is \code{TRUE}. #' @inheritParams param_niceColumnNamesEnabled #' @inheritParams param_three_dots #' #' @details #' Coerces the frame set to a matrix. #' #' @template return_matrix #' #' @export #' #' @keywords internal #' as.matrix.FieldSet <- function(x, ..., enforceRowNames = TRUE, niceColumnNamesEnabled = TRUE) { dataFrame <- as.data.frame(x, niceColumnNamesEnabled = niceColumnNamesEnabled) dataFrame <- .setStagesAsFirstColumn(dataFrame) result <- as.matrix(dataFrame) if (nrow(result) == 0) { return(result) } if (inherits(x, "PowerAndAverageSampleNumberResult")) { dimnames(result)[[1]] <- rep("", nrow(result)) return(result) } if (inherits(x, "AnalysisResults")) { dfDesign <- as.data.frame(x$.design, niceColumnNamesEnabled = niceColumnNamesEnabled) dfStageResults <- as.data.frame(x$.stageResults, niceColumnNamesEnabled = niceColumnNamesEnabled) dfStageResults <- dfStageResults[!is.na(dfStageResults[, grep("(test statistic)|(testStatistics)", colnames(dfStageResults))]), ] if (length(intersect(names(dfDesign), names(dfStageResults))) == 1) { dfTemp <- merge(dfDesign, dfStageResults) if (length(intersect(names(dfTemp), names(dataFrame))) >= 1) { dataFrame <- merge(dfTemp, dataFrame, all.x = FALSE, all.y = TRUE) dataFrame <- .setStagesAsFirstColumn(dataFrame) result <- as.matrix(dataFrame) } } else if (length(intersect(names(dfStageResults), names(dataFrame))) >= 1) { dataFrame <- merge(dfStageResults, dataFrame, all.x = FALSE, all.y = TRUE) dataFrame <- .setStagesAsFirstColumn(dataFrame) result <- as.matrix(dataFrame) } } if (any(grepl("^(S|s)tages?$", colnames(result)))) { dimnames(result)[[1]] <- rep("", nrow(result)) } return(result) } .setStagesAsFirstColumn <- function(data) { columnNames <- colnames(data) index <- grep("^(S|s)tages?$", columnNames) if (length(index) == 0 || index == 1) { return(data) } stageName <- columnNames[index[1]] stageNumbers <- data[, stageName] if (is.null(stageNumbers) || length(stageNumbers) == 0) { return(data) } data <- data[, c(stageName, columnNames[columnNames != stageName])] return(data) } #' #' @title #' Parameter Set Summary #' #' @description #' Displays a summary of \code{\link{ParameterSet}} object. #' #' @param object A \code{\link{ParameterSet}} object. #' @inheritParams param_digits #' @inheritParams param_three_dots #' #' @details #' Summarizes the parameters and results of a parameter set. #' #' @template details_summary #' #' @template return_object_summary #' @template how_to_get_help_for_generics #' #' @export #' #' @keywords internal #' summary.ParameterSet <- function(object, ..., type = 1, digits = NA_integer_, output = c("all", "title", "overview", "body")) { .warnInCaseOfUnknownArguments(functionName = "summary", ...) if (type == 1 && inherits(object, "SummaryFactory")) { return(object) } if (type == 1 && (inherits(object, "TrialDesign") || inherits(object, "TrialDesignPlan") || inherits(object, "SimulationResults") || inherits(object, "AnalysisResults") || inherits(object, "TrialDesignCharacteristics") || inherits(object, "PerformanceScore"))) { output <- match.arg(output) return(.createSummary(object, digits = digits, output = output)) } # create technical summary object$show(showType = 2) object$.cat("\n") if (!is.null(object[[".piecewiseSurvivalTim"]])) { object$.piecewiseSurvivalTime$show() object$.cat("\n") } if (!is.null(object[[".accrualTime"]])) { object$.accrualTime$show() object$.cat("\n") } object$.cat(object$.toString(startWithUpperCase = TRUE), " table:\n", heading = 1) parametersToShow <- object$.getParametersToShow() for (parameter in parametersToShow) { if (length(object[[parameter]]) == 1) { parametersToShow <- parametersToShow[parametersToShow != parameter] } } object$.printAsDataFrame(parameterNames = parametersToShow, niceColumnNamesEnabled = TRUE) invisible(object) } #' #' @title #' Print Parameter Set Values #' #' @description #' \code{print} prints its \code{ParameterSet} argument and returns it invisibly (via \code{invisible(x)}). #' #' @param x The \code{\link{ParameterSet}} object to print. #' @param markdown If \code{TRUE}, the object \code{x} will be printed using markdown syntax; #' normal representation will be used otherwise (default is \code{FALSE}) #' @inheritParams param_three_dots #' #' @details #' Prints the parameters and results of a parameter set. #' #' @export #' #' @keywords internal #' print.ParameterSet <- function(x, ..., markdown = FALSE) { if (markdown) { x$.catMarkdownText() return(invisible(x)) } x$show() invisible(x) } #' #' @title #' Parameter Set Plotting #' #' @description #' Plots an object that inherits from class \code{\link{ParameterSet}}. #' #' @param x The object that inherits from \code{\link{ParameterSet}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @param type The plot type (default = 1). #' @inheritParams param_palette #' @inheritParams param_showSource #' @inheritParams param_plotSettings #' @inheritParams param_legendPosition #' @inheritParams param_three_dots_plot #' #' @details #' Generic function to plot a parameter set. #' #' @template return_object_ggplot #' #' @export #' plot.ParameterSet <- function(x, y, ..., main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1L, palette = "Set1", legendPosition = NA_integer_, showSource = FALSE, plotSettings = NULL) { .assertGgplotIsInstalled() stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "sorry, function 'plot' is not implemented yet for class '", .getClassName(x), "'" ) } .getKnitPrintVersion <- function(x, ...) { fCall <- match.call(expand.dots = FALSE) .assertPackageIsInstalled("knitr") args <- list(x = x, markdown = TRUE) if (.isSimulationResults(x)) { showStatistics <- .getOptionalArgument("showStatistics", optionalArgumentDefaultValue = FALSE, ...) if (isTRUE(showStatistics)) { args$showStatistics <- TRUE } } if (inherits(x, "SummaryFactory") || .isSummaryPipe(fCall)) { args$showSummary <- TRUE } return(do.call(what = print, args = args)) } #' #' @title #' Print Parameter Set in Markdown Code Chunks #' #' @description #' The function `knit_print.ParameterSet` is the default printing function for rpact result objects in knitr. #' The chunk option `render` uses this function by default. #' To fall back to the normal printing behavior set the chunk option `render = normal_print`. #' For more information see \code{\link[knitr]{knit_print}}. #' #' @param x A \code{ParameterSet}. #' @param ... Other arguments (see \code{\link[knitr]{knit_print}}). #' #' @details #' Generic function to print a parameter set in Markdown. #' Use \code{options("rpact.print.heading.base.number" = "NUMBER")} (where \code{NUMBER} is an integer value >= -1) to #' specify the heading level. The default is \code{options("rpact.print.heading.base.number" = "0")}, i.e., the #' top headings start with \code{##} in Markdown. \code{options("rpact.print.heading.base.number" = "-1")} means #' that all headings will be written bold but are not explicit defined as header. #' #' @export #' knit_print.ParameterSet <- function(x, ...) { result <- paste0(utils::capture.output(.getKnitPrintVersion(x = x, ...)), collapse = "\n") return(knitr::asis_output(result)) } #' #' @title #' Create output in Markdown #' #' @description #' The \code{kable()} function returns the output of the specified object formatted in Markdown. #' #' @param x A \code{ParameterSet}. If x does not inherit from class \code{\link{ParameterSet}}, #' \code{knitr::kable(x)} will be returned. #' @param ... Other arguments (see \code{\link[knitr]{kable}}). #' #' @details #' Generic function to represent a parameter set in Markdown. #' Use \code{options("rpact.print.heading.base.number" = "NUMBER")} (where \code{NUMBER} is an integer value >= -1) to #' specify the heading level. The default is \code{options("rpact.print.heading.base.number" = "0")}, i.e., the #' top headings start with \code{##} in Markdown. \code{options("rpact.print.heading.base.number" = "-1")} means #' that all headings will be written bold but are not explicit defined as header. #' #' @export #' kable.ParameterSet <- function(x, ...) { fCall <- match.call(expand.dots = FALSE) if (inherits(x, "ParameterSet")) { objName <- deparse(fCall$x) if (length(objName) > 0) { if (length(objName) > 1) { objName <- paste0(objName[1], "...") } if (grepl("^ *print\\(", objName)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "kable(", objName, ") does not work correctly. ", "Use ", sub("print", "kable", objName), " without 'print' instead or ", sub("\\)", ", markdown = TRUE)", objName) ) } } return(knit_print.ParameterSet(x = x, ...)) } .assertPackageIsInstalled("knitr") knitr::kable(x, ...) } #' #' @title #' Create tables in Markdown #' #' @description #' The \code{kable()} function returns a single table for a single object that inherits from class \code{\link{ParameterSet}}. #' #' @details #' Generic to represent a parameter set in Markdown. #' #' @param x The object that inherits from \code{\link{ParameterSet}}. #' @param ... Other arguments (see \code{\link[knitr]{kable}}). #' #' @export #' setGeneric("kable", kable.ParameterSet) rpact/R/f_analysis_multiarm_means.R0000644000176200001440000021113414445307575017154 0ustar liggesusers## | ## | *Analysis of means in multi-arm designs with adaptive test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_logger.R NULL .getAnalysisResultsMeansMultiArm <- function(..., design, dataInput) { if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsMeansInverseNormalMultiArm(design = design, dataInput = dataInput, ...)) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsMeansFisherMultiArm(design = design, dataInput = dataInput, ...)) } if (.isTrialDesignConditionalDunnett(design)) { return(.getAnalysisResultsMeansConditionalDunnettMultiArm(design = design, dataInput = dataInput, ...)) } .stopWithWrongDesignMessage(design) } .getAnalysisResultsMeansInverseNormalMultiArm <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, varianceOption = C_VARIANCE_OPTION_MULTIARMED_DEFAULT, thetaH0 = C_THETA_H0_MEANS_DEFAULT, thetaH1 = NA_real_, assumedStDevs = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, calculateSingleStepAdjusted = FALSE, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsMeansInverseNormalMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsMultiArmInverseNormal(design = design, dataInput = dataInput) results <- .getAnalysisResultsMeansMultiArmAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, varianceOption = varianceOption, thetaH0 = thetaH0, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, calculateSingleStepAdjusted = calculateSingleStepAdjusted, tolerance = tolerance ) return(results) } .getAnalysisResultsMeansFisherMultiArm <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, varianceOption = C_VARIANCE_OPTION_MULTIARMED_DEFAULT, thetaH0 = C_THETA_H0_MEANS_DEFAULT, thetaH1 = NA_real_, assumedStDevs = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, calculateSingleStepAdjusted = FALSE, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsMeansFisherMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsMultiArmFisher(design = design, dataInput = dataInput) results <- .getAnalysisResultsMeansMultiArmAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, varianceOption = varianceOption, thetaH0 = thetaH0, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, calculateSingleStepAdjusted = calculateSingleStepAdjusted, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } .getAnalysisResultsMeansConditionalDunnettMultiArm <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, varianceOption = C_VARIANCE_OPTION_MULTIARMED_DEFAULT, thetaH0 = C_THETA_H0_MEANS_DEFAULT, thetaH1 = NA_real_, assumedStDevs = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, calculateSingleStepAdjusted = FALSE, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignConditionalDunnett(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsMeansConditionalDunnettMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsConditionalDunnett(design = design, dataInput = dataInput) results <- .getAnalysisResultsMeansMultiArmAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, varianceOption = varianceOption, thetaH0 = thetaH0, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, calculateSingleStepAdjusted = calculateSingleStepAdjusted, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } .getAnalysisResultsMeansMultiArmAll <- function(..., results, design, dataInput, intersectionTest, stage, directionUpper, normalApproximation, varianceOption, thetaH0, thetaH1, assumedStDevs, nPlanned, allocationRatioPlanned, calculateSingleStepAdjusted, tolerance, iterations, seed) { startTime <- Sys.time() intersectionTest <- .getCorrectedIntersectionTestMultiArmIfNecessary(design, intersectionTest) stageResults <- .getStageResultsMeansMultiArm( design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, varianceOption = varianceOption, calculateSingleStepAdjusted = calculateSingleStepAdjusted, userFunctionCallEnabled = TRUE ) normalApproximation <- stageResults$normalApproximation intersectionTest <- stageResults$intersectionTest results$.setStageResults(stageResults) .logProgress("Stage results calculated", startTime = startTime) numberOfGroups <- dataInput$getNumberOfGroups() thetaH1 <- .assertIsValidThetaH1ForMultiArm(thetaH1, stageResults, stage, results = results) assumedStDevs <- .assertIsValidAssumedStDevForMultiHypotheses( assumedStDevs, stageResults, stage, results = results ) .setValueAndParameterType( results, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_MULTIARMED_DEFAULT ) .setValueAndParameterType( results, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT ) .setValueAndParameterType( results, "normalApproximation", normalApproximation, C_NORMAL_APPROXIMATION_MEANS_DEFAULT ) .setValueAndParameterType( results, "varianceOption", varianceOption, C_VARIANCE_OPTION_MULTIARMED_DEFAULT ) .setValueAndParameterType(results, "thetaH0", thetaH0, C_THETA_H0_MEANS_DEFAULT) .setConditionalPowerArguments(results, dataInput, nPlanned, allocationRatioPlanned) .setNPlannedAndThetaH1AndAssumedStDevs(results, nPlanned, thetaH1, assumedStDevs) startTime <- Sys.time() if (!.isTrialDesignConditionalDunnett(design)) { results$.closedTestResults <- getClosedCombinationTestResults(stageResults = stageResults) } else { results$.closedTestResults <- getClosedConditionalDunnettTestResults( stageResults = stageResults, design = design, stage = stage ) } .logProgress("Closed test calculated", startTime = startTime) results$.setParameterType("seed", C_PARAM_NOT_APPLICABLE) results$.setParameterType("iterations", C_PARAM_NOT_APPLICABLE) if (design$kMax > 1) { # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { results$.conditionalPowerResults <- .getConditionalPowerMeansMultiArm( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, iterations = iterations, seed = seed ) .synchronizeIterationsAndSeed(results) } else { results$.conditionalPowerResults <- .getConditionalPowerMeansMultiArm( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDevs = assumedStDevs ) results$conditionalPower <- results$.conditionalPowerResults$conditionalPower results$.setParameterType("conditionalPower", C_PARAM_GENERATED) } results$thetaH1 <- matrix(results$.conditionalPowerResults$thetaH1, ncol = 1) results$assumedStDevs <- matrix(results$.conditionalPowerResults$assumedStDevs, ncol = 1) .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() results$conditionalRejectionProbabilities <- .getConditionalRejectionProbabilitiesMultiArm( stageResults = stageResults, stage = stage ) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_GENERATED) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) } else { results$.setParameterType("conditionalPower", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalPowerSimulated", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_NOT_APPLICABLE) } # RCI - repeated confidence interval repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsMeansMultiArm( design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, normalApproximation = normalApproximation, varianceOption = varianceOption, tolerance = tolerance ) gMax <- stageResults$getGMax() results$repeatedConfidenceIntervalLowerBounds <- matrix(rep(NA_real_, gMax * design$kMax), nrow = gMax, ncol = design$kMax) results$repeatedConfidenceIntervalUpperBounds <- results$repeatedConfidenceIntervalLowerBounds for (k in 1:design$kMax) { for (treatmentArm in 1:gMax) { results$repeatedConfidenceIntervalLowerBounds[treatmentArm, k] <- repeatedConfidenceIntervals[treatmentArm, 1, k] results$repeatedConfidenceIntervalUpperBounds[treatmentArm, k] <- repeatedConfidenceIntervals[treatmentArm, 2, k] } } results$.setParameterType("repeatedConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("repeatedConfidenceIntervalUpperBounds", C_PARAM_GENERATED) # repeated p-value results$repeatedPValues <- .getRepeatedPValuesMultiArm( stageResults = stageResults, tolerance = tolerance ) results$.setParameterType("repeatedPValues", C_PARAM_GENERATED) return(results) } .getStageResultsMeansMultiArm <- function(..., design, dataInput, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, varianceOption = C_VARIANCE_OPTION_MULTIARMED_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, calculateSingleStepAdjusted = FALSE, userFunctionCallEnabled = FALSE) { .assertIsTrialDesign(design) .assertIsDatasetMeans(dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertIsValidDirectionUpper(directionUpper, design$sided) .assertIsSingleLogical(normalApproximation, "normalApproximation") .assertIsValidVarianceOptionMultiArmed(design, varianceOption) .warnInCaseOfUnknownArguments( functionName = ".getStageResultsMeansMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) gMax <- dataInput$getNumberOfGroups() - 1 kMax <- design$kMax if (.isTrialDesignConditionalDunnett(design)) { if (normalApproximation == FALSE) { if (userFunctionCallEnabled) { warning("'normalApproximation' was set to TRUE ", "because conditional Dunnett test was specified as design", call. = FALSE ) } normalApproximation <- TRUE } } intersectionTest <- .getCorrectedIntersectionTestMultiArmIfNecessary( design, intersectionTest, userFunctionCallEnabled ) .assertIsValidIntersectionTestMultiArm(design, intersectionTest) if (intersectionTest == "Dunnett" && varianceOption != "overallPooled" && !normalApproximation) { stop("Dunnett t test can only be performed with overall variance estimation, select 'varianceOption' = \"overallPooled\"", call. = FALSE) } stageResults <- StageResultsMultiArmMeans( design = design, dataInput = dataInput, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), normalApproximation = normalApproximation, directionUpper = directionUpper, varianceOption = varianceOption, stage = stage ) .setValueAndParameterType( stageResults, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_MULTIARMED_DEFAULT ) effectSizes <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallStDevs <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallPooledStDevs <- matrix(rep(NA_real_, kMax), 1, kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallPValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) dimnames(testStatistics) <- list(paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) dimnames(overallTestStatistics) <- list( paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "") ) dimnames(separatePValues) <- list(paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) dimnames(overallPValues) <- list(paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) for (k in 1:stage) { overallPooledStDevs[1, k] <- sqrt(sum((dataInput$getOverallSampleSizes(stage = k) - 1) * dataInput$getOverallStDevs(stage = k)^2, na.rm = TRUE) / sum(dataInput$getOverallSampleSizes(stage = k) - 1, na.rm = TRUE)) if (varianceOption == "overallPooled") { stDev <- sqrt(sum((dataInput$getSampleSizes(stage = k) - 1) * dataInput$getStDevs(stage = k)^2, na.rm = TRUE) / sum(dataInput$getSampleSizes(stage = k) - 1, na.rm = TRUE)) overallStDevForTest <- overallPooledStDevs[1, k] } for (treatmentArm in 1:gMax) { effectSizes[treatmentArm, k] <- dataInput$getOverallMeans(stage = k, group = treatmentArm) - dataInput$getOverallMeans(stage = k, group = gMax + 1) overallStDevs[treatmentArm, k] <- sqrt(sum(( dataInput$getOverallSampleSize(stage = k, group = c(treatmentArm, gMax + 1)) - 1) * dataInput$getOverallStDev(stage = k, group = c(treatmentArm, gMax + 1))^2, na.rm = TRUE) / sum(dataInput$getOverallSampleSize(stage = k, group = c(treatmentArm, gMax + 1)) - 1)) if (varianceOption == "pairwisePooled") { stDev <- sqrt(sum((dataInput$getSampleSizes(stage = k, group = c(treatmentArm, gMax + 1)) - 1) * dataInput$getStDevs(stage = k, group = c(treatmentArm, gMax + 1))^2, na.rm = TRUE) / sum(dataInput$getSampleSizes(stage = k, group = c(treatmentArm, gMax + 1)) - 1)) overallStDevForTest <- overallStDevs[treatmentArm, k] } if (varianceOption == "notPooled") { testStatistics[treatmentArm, k] <- (dataInput$getMeans(stage = k, group = treatmentArm) - dataInput$getMeans(stage = k, group = gMax + 1) - thetaH0) / sqrt(dataInput$getStDevs(stage = k, group = treatmentArm)^2 / dataInput$getSampleSizes(stage = k, group = treatmentArm) + dataInput$getStDevs(stage = k, group = gMax + 1)^2 / dataInput$getSampleSizes(stage = k, group = gMax + 1)) overallTestStatistics[treatmentArm, k] <- ( dataInput$getOverallMeans(stage = k, group = treatmentArm) - dataInput$getOverallMeans(stage = k, group = gMax + 1) - thetaH0) / sqrt(dataInput$getOverallStDevs(stage = k, group = treatmentArm)^2 / dataInput$getOverallSampleSizes(stage = k, group = treatmentArm) + dataInput$getOverallStDevs(stage = k, group = gMax + 1)^2 / dataInput$getOverallSampleSizes(stage = k, group = gMax + 1)) } else { testStatistics[treatmentArm, k] <- (dataInput$getMeans(stage = k, group = treatmentArm) - dataInput$getMeans(stage = k, group = gMax + 1) - thetaH0) / stDev / sqrt(1 / dataInput$getSampleSizes(stage = k, group = treatmentArm) + 1 / dataInput$getSampleSizes(stage = k, group = gMax + 1)) overallTestStatistics[treatmentArm, k] <- ( dataInput$getOverallMeans(stage = k, group = treatmentArm) - dataInput$getOverallMeans(stage = k, group = gMax + 1) - thetaH0) / overallStDevForTest / sqrt(1 / dataInput$getOverallSampleSizes(stage = k, group = treatmentArm) + 1 / dataInput$getOverallSampleSizes(stage = k, group = gMax + 1)) } if (normalApproximation) { separatePValues[treatmentArm, k] <- 1 - stats::pnorm(testStatistics[treatmentArm, k]) overallPValues[treatmentArm, k] <- 1 - stats::pnorm(overallTestStatistics[treatmentArm, k]) } else { if (varianceOption == "overallPooled") { separatePValues[treatmentArm, k] <- 1 - stats::pt( testStatistics[treatmentArm, k], sum(dataInput$getSampleSizes(stage = k) - 1, na.rm = TRUE) ) overallPValues[treatmentArm, k] <- 1 - stats::pt( overallTestStatistics[treatmentArm, k], sum(dataInput$getOverallSampleSizes(stage = k) - 1, na.rm = TRUE) ) } else if (varianceOption == "pairwisePooled") { separatePValues[treatmentArm, k] <- 1 - stats::pt( testStatistics[treatmentArm, k], sum(dataInput$getSampleSizes(stage = k, group = c(treatmentArm, gMax + 1)) - 1) ) overallPValues[treatmentArm, k] <- 1 - stats::pt( overallTestStatistics[treatmentArm, k], sum(dataInput$getOverallSampleSizes(stage = k, group = c(treatmentArm, gMax + 1)) - 1) ) } else if (varianceOption == "notPooled") { u <- dataInput$getStDevs(stage = k, group = treatmentArm)^2 / dataInput$getSampleSizes(stage = k, group = treatmentArm) / (dataInput$getStDevs(stage = k, group = treatmentArm)^2 / dataInput$getSampleSizes(stage = k, group = treatmentArm) + dataInput$getStDevs(stage = k, group = gMax + 1)^2 / dataInput$getSampleSizes(stage = k, group = gMax + 1)) separatePValues[treatmentArm, k] <- 1 - stats::pt( testStatistics[treatmentArm, k], 1 / (u^2 / (dataInput$getSampleSizes(stage = k, group = treatmentArm) - 1) + (1 - u)^2 / (dataInput$getSampleSizes(stage = k, group = gMax + 1) - 1)) ) u <- dataInput$getOverallStDevs(stage = k, group = treatmentArm)^2 / dataInput$getOverallSampleSizes(stage = k, group = treatmentArm) / (dataInput$getOverallStDevs(stage = k, group = treatmentArm)^2 / dataInput$getOverallSampleSizes(stage = k, group = treatmentArm) + dataInput$getOverallStDevs(stage = k, group = gMax + 1)^2 / dataInput$getOverallSampleSizes(stage = k, group = gMax + 1)) overallPValues[treatmentArm, k] <- 1 - stats::pt( overallTestStatistics[treatmentArm, k], 1 / (u^2 / (dataInput$getOverallSampleSizes(stage = k, group = treatmentArm) - 1) + (1 - u)^2 / (dataInput$getOverallSampleSizes(stage = k, group = gMax + 1) - 1)) ) } } if (!directionUpper) { separatePValues[treatmentArm, k] <- 1 - separatePValues[treatmentArm, k] overallPValues[treatmentArm, k] <- 1 - overallPValues[treatmentArm, k] # testStatistics[treatmentArm, k] <- -testStatistics[treatmentArm, k] # overallTestStatistics[treatmentArm, k] <- -overallTestStatistics[treatmentArm, k] } } } .setWeightsToStageResults(design, stageResults) # Calculation of single stage adjusted p-Values and overall test statistics # for determination of RCIs if (calculateSingleStepAdjusted) { singleStepAdjustedPValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) combInverseNormal <- matrix(NA_real_, nrow = gMax, ncol = kMax) combFisher <- matrix(NA_real_, nrow = gMax, ncol = kMax) if (.isTrialDesignInverseNormal(design)) { weightsInverseNormal <- stageResults$weightsInverseNormal } else if (.isTrialDesignFisher(design)) { weightsFisher <- stageResults$weightsFisher } for (k in 1:stage) { selected <- sum(!is.na(separatePValues[, k])) sampleSizesSelected <- as.numeric(na.omit( dataInput$getSampleSizes(stage = k, group = -(gMax + 1)) )) sigma <- sqrt(sampleSizesSelected / (sampleSizesSelected + dataInput$getSampleSize(k, gMax + 1))) %*% sqrt(t(sampleSizesSelected / (sampleSizesSelected + dataInput$getSampleSize(k, gMax + 1)))) diag(sigma) <- 1 for (treatmentArm in 1:gMax) { if (intersectionTest == "Bonferroni" || intersectionTest == "Simes") { if (.isTrialDesignGroupSequential(design)) { overallPValues[treatmentArm, k] <- min(1, overallPValues[treatmentArm, k] * selected) } else { singleStepAdjustedPValues[treatmentArm, k] <- min( 1, separatePValues[treatmentArm, k] * selected ) } } else if (intersectionTest == "Sidak") { if (.isTrialDesignGroupSequential(design)) { overallPValues[treatmentArm, k] <- 1 - (1 - overallPValues[treatmentArm, k])^selected } else { singleStepAdjustedPValues[treatmentArm, k] <- 1 - (1 - separatePValues[treatmentArm, k])^selected } } else if (intersectionTest == "Dunnett") { if (!is.na(testStatistics[treatmentArm, k])) { df <- NA_real_ if (!normalApproximation) { df <- sum(dataInput$getSampleSizes(stage = k) - 1, na.rm = TRUE) } singleStepAdjustedPValues[treatmentArm, k] <- 1 - .getMultivariateDistribution( type = ifelse(normalApproximation, "normal", "t"), upper = ifelse(directionUpper, testStatistics[treatmentArm, k], -testStatistics[treatmentArm, k] ), sigma = sigma, df = df ) } } if (.isTrialDesignInverseNormal(design)) { combInverseNormal[treatmentArm, k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(singleStepAdjustedPValues[treatmentArm, 1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) } else if (.isTrialDesignFisher(design)) { combFisher[treatmentArm, k] <- prod( singleStepAdjustedPValues[treatmentArm, 1:k]^weightsFisher[1:k] ) } } } stageResults$overallTestStatistics <- overallTestStatistics stageResults$overallPValues <- overallPValues stageResults$effectSizes <- effectSizes stageResults$overallStDevs <- overallStDevs stageResults$testStatistics <- testStatistics stageResults$separatePValues <- separatePValues stageResults$singleStepAdjustedPValues <- singleStepAdjustedPValues stageResults$.setParameterType("singleStepAdjustedPValues", C_PARAM_GENERATED) if (.isTrialDesignFisher(design)) { stageResults$combFisher <- combFisher stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$combInverseNormal <- combInverseNormal stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) } } else { stageResults$overallTestStatistics <- overallTestStatistics stageResults$overallPValues <- overallPValues stageResults$effectSizes <- effectSizes stageResults$overallStDevs <- overallStDevs stageResults$overallPooledStDevs <- overallPooledStDevs stageResults$testStatistics <- testStatistics stageResults$separatePValues <- separatePValues } return(stageResults) } .getRootThetaMeansMultiArm <- function(..., design, dataInput, treatmentArm, stage, directionUpper, normalApproximation, varianceOption, intersectionTest, thetaLow, thetaUp, firstParameterName, secondValue, tolerance) { result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsMeansMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, varianceOption = varianceOption, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][treatmentArm, stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, callingFunctionInformation = ".getRootThetaMeansMultiArm" ) return(result) } .getUpperLowerThetaMeansMultiArm <- function(..., design, dataInput, theta, treatmentArm, stage, directionUpper, normalApproximation, varianceOption, conditionFunction, intersectionTest, firstParameterName, secondValue) { stageResults <- .getStageResultsMeansMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, varianceOption = varianceOption, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][treatmentArm, stage] maxSearchIterations <- 30 while (conditionFunction(secondValue, firstValue)) { theta <- 2 * theta stageResults <- .getStageResultsMeansMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, varianceOption = varianceOption, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][treatmentArm, stage] maxSearchIterations <- maxSearchIterations - 1 if (maxSearchIterations < 0) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, sprintf( paste0( "failed to find theta (k = %s, firstValue = %s, ", "secondValue = %s, levels(firstValue) = %s, theta = %s)" ), stage, stageResults[[firstParameterName]][treatmentArm, stage], secondValue, firstValue, theta ) ) } } return(theta) } .getRepeatedConfidenceIntervalsMeansMultiArmAll <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, varianceOption = C_VARIANCE_OPTION_MULTIARMED_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { .assertIsValidIntersectionTestMultiArm(design, intersectionTest) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) stageResults <- .getStageResultsMeansMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = 0, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, varianceOption = varianceOption, calculateSingleStepAdjusted = FALSE ) gMax <- dataInput$getNumberOfGroups() - 1 repeatedConfidenceIntervals <- array(NA_real_, dim = c(gMax, 2, design$kMax)) # Confidence interval for second stage when using conditional Dunnett test if (.isTrialDesignConditionalDunnett(design)) { startTime <- Sys.time() for (treatmentArm in 1:gMax) { if (!is.na(stageResults$testStatistics[treatmentArm, 2])) { thetaLowLimit <- -1 iteration <- 30 rejected <- FALSE while (!rejected && iteration >= 0) { stageResults <- .getStageResultsMeansMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaLowLimit, directionUpper = TRUE, intersectionTest = intersectionTest, normalApproximation = normalApproximation, varianceOption = varianceOption, calculateSingleStepAdjusted = FALSE ) rejected <- .getConditionalDunnettTestForCI( design = design, stageResults = stageResults, treatmentArm = treatmentArm ) iteration <- iteration - 1 thetaLowLimit <- 2 * thetaLowLimit } iteration <- 30 thetaUpLimit <- 1 rejected <- FALSE while (!rejected && iteration >= 0) { stageResults <- .getStageResultsMeansMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaUpLimit, directionUpper = FALSE, intersectionTest = intersectionTest, normalApproximation = normalApproximation, varianceOption = varianceOption, calculateSingleStepAdjusted = FALSE ) rejected <- .getConditionalDunnettTestForCI( design = design, stageResults = stageResults, treatmentArm = treatmentArm ) iteration <- iteration - 1 thetaUpLimit <- 2 * thetaUpLimit } thetaLow <- thetaLowLimit thetaUp <- thetaUpLimit iteration <- 30 prec <- 1 while (prec > tolerance) { theta <- (thetaLow + thetaUp) / 2 stageResults <- .getStageResultsMeansMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = TRUE, intersectionTest = intersectionTest, normalApproximation = normalApproximation, varianceOption = varianceOption, calculateSingleStepAdjusted = FALSE ) conditionalDunnettSingleStepRejected <- .getConditionalDunnettTestForCI( design = design, stageResults = stageResults, treatmentArm = treatmentArm ) ifelse(conditionalDunnettSingleStepRejected, thetaLow <- theta, thetaUp <- theta) ifelse(iteration > 0, prec <- thetaUp - thetaLow, prec <- 0) iteration <- iteration - 1 } repeatedConfidenceIntervals[treatmentArm, 1, 2] <- theta thetaLow <- thetaLowLimit thetaUp <- thetaUpLimit iteration <- 30 prec <- 1 while (prec > tolerance) { theta <- (thetaLow + thetaUp) / 2 stageResults <- .getStageResultsMeansMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = FALSE, intersectionTest = intersectionTest, normalApproximation = normalApproximation, varianceOption = varianceOption, calculateSingleStepAdjusted = FALSE ) conditionalDunnettSingleStepRejected <- .getConditionalDunnettTestForCI( design = design, stageResults = stageResults, treatmentArm = treatmentArm ) ifelse(conditionalDunnettSingleStepRejected, thetaUp <- theta, thetaLow <- theta) ifelse(iteration > 0, prec <- thetaUp - thetaLow, prec <- 0) iteration <- iteration - 1 } repeatedConfidenceIntervals[treatmentArm, 2, 2] <- theta if (!is.na(repeatedConfidenceIntervals[treatmentArm, 1, 2]) && !is.na(repeatedConfidenceIntervals[treatmentArm, 2, 2]) && repeatedConfidenceIntervals[treatmentArm, 1, 2] > repeatedConfidenceIntervals[treatmentArm, 2, 2]) { repeatedConfidenceIntervals[treatmentArm, , 2] <- rep(NA_real_, 2) } } } .logProgress("Confidence intervals for final stage calculated", startTime = startTime) } else { # Repeated onfidence intervals when using combination tests if (intersectionTest == "Hierarchical") { warning("Repeated confidence intervals not available for ", "'intersectionTest' = \"Hierarchical\"", call. = FALSE ) return(repeatedConfidenceIntervals) } if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT criticalValues <- design$criticalValues conditionFunction <- .isFirstValueSmallerThanSecondValue } else if (.isTrialDesignInverseNormal(design)) { bounds <- design$futilityBounds border <- C_FUTILITY_BOUNDS_DEFAULT criticalValues <- design$criticalValues criticalValues[is.infinite(criticalValues) & criticalValues > 0] <- C_QNORM_MAXIMUM criticalValues[is.infinite(criticalValues) & criticalValues < 0] <- C_QNORM_MINIMUM conditionFunction <- .isFirstValueGreaterThanSecondValue } # Necessary for adjustment for binding futility boundaries futilityCorr <- rep(NA_real_, design$kMax) stages <- (1:stage) for (k in stages) { startTime <- Sys.time() for (treatmentArm in 1:gMax) { if (!is.na(stageResults$testStatistics[treatmentArm, k]) && criticalValues[k] < C_QNORM_MAXIMUM) { # finding maximum upper and minimum lower bounds for RCIs thetaLow <- .getUpperLowerThetaMeansMultiArm( design = design, dataInput = dataInput, theta = -1, treatmentArm = treatmentArm, stage = k, directionUpper = TRUE, normalApproximation = normalApproximation, varianceOption = varianceOption, conditionFunction = conditionFunction, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k] ) thetaUp <- .getUpperLowerThetaMeansMultiArm( design = design, dataInput = dataInput, theta = 1, treatmentArm = treatmentArm, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, varianceOption = varianceOption, conditionFunction = conditionFunction, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k] ) # finding upper and lower RCI limits through root function repeatedConfidenceIntervals[treatmentArm, 1, k] <- .getRootThetaMeansMultiArm( design = design, dataInput = dataInput, treatmentArm = treatmentArm, stage = k, directionUpper = TRUE, normalApproximation = normalApproximation, varianceOption = varianceOption, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) repeatedConfidenceIntervals[treatmentArm, 2, k] <- .getRootThetaMeansMultiArm( design = design, dataInput = dataInput, treatmentArm = treatmentArm, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, varianceOption = varianceOption, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) # adjustment for binding futility bounds if (k > 1 && !is.na(bounds[k - 1]) && conditionFunction(bounds[k - 1], border) && design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "singleStepAdjustedPValues", firstParameterName ) # Calculate new lower and upper bounds if (directionUpper) { thetaLow <- .getUpperLowerThetaMeansMultiArm( design = design, dataInput = dataInput, theta = -1, treatmentArm = treatmentArm, stage = k - 1, directionUpper = TRUE, normalApproximation = normalApproximation, varianceOption = varianceOption, conditionFunction = conditionFunction, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1] ) } else { thetaUp <- .getUpperLowerThetaMeansMultiArm( design = design, dataInput = dataInput, theta = 1, treatmentArm = treatmentArm, stage = k - 1, directionUpper = FALSE, normalApproximation = normalApproximation, varianceOption = varianceOption, conditionFunction = conditionFunction, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1] ) } futilityCorr[k] <- .getRootThetaMeansMultiArm( design = design, dataInput = dataInput, treatmentArm = treatmentArm, stage = k - 1, directionUpper = directionUpper, normalApproximation = normalApproximation, varianceOption = varianceOption, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance ) if (directionUpper) { repeatedConfidenceIntervals[treatmentArm, 1, k] <- min( min(futilityCorr[2:k]), repeatedConfidenceIntervals[treatmentArm, 1, k] ) } else { repeatedConfidenceIntervals[treatmentArm, 2, k] <- max( max(futilityCorr[2:k]), repeatedConfidenceIntervals[treatmentArm, 2, k] ) } } if (!is.na(repeatedConfidenceIntervals[treatmentArm, 1, k]) && !is.na(repeatedConfidenceIntervals[treatmentArm, 2, k]) && repeatedConfidenceIntervals[treatmentArm, 1, k] > repeatedConfidenceIntervals[treatmentArm, 2, k]) { repeatedConfidenceIntervals[treatmentArm, , k] <- rep(NA_real_, 2) } } } .logProgress("Repeated confidence intervals for stage %s calculated", startTime = startTime, k) } } return(repeatedConfidenceIntervals) } #' #' RCIs based on inverse normal combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsMeansMultiArmInverseNormal <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, varianceOption = C_VARIANCE_OPTION_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsMeansMultiArmInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) return(.getRepeatedConfidenceIntervalsMeansMultiArmAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, varianceOption = varianceOption, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combInverseNormal", ... )) } #' #' RCIs based on Fisher's combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsMeansMultiArmFisher <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, varianceOption = C_VARIANCE_OPTION_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsMeansMultiArmFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) return(.getRepeatedConfidenceIntervalsMeansMultiArmAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, varianceOption = varianceOption, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combFisher", ... )) } #' #' CIs based on conditional Dunnett test #' #' @noRd #' .getRepeatedConfidenceIntervalsMeansMultiArmConditionalDunnett <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, varianceOption = C_VARIANCE_OPTION_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsMeansMultiArmConditionalDunnett", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsMeansMultiArmAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, varianceOption = varianceOption, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = NA, ... )) } #' #' Calculation of lower and upper limits of repeated confidence intervals (RCIs) for Means #' #' @noRd #' .getRepeatedConfidenceIntervalsMeansMultiArm <- function(..., design) { if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsMeansMultiArmInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsMeansMultiArmFisher(design = design, ...)) } if (.isTrialDesignConditionalDunnett(design)) { return(.getRepeatedConfidenceIntervalsMeansMultiArmConditionalDunnett(design = design, ...)) } .stopWithWrongDesignMessage(design) } #' #' Calculation of conditional power for Means #' #' @noRd #' .getConditionalPowerMeansMultiArm <- function(..., stageResults, stage = stageResults$stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaH1 = NA_real_, assumedStDevs = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { stDevsH1 <- .getOptionalArgument("stDevsH1", ...) if (!is.null(stDevsH1) && !is.na(stDevsH1)) { if (!is.na(assumedStDevs)) { warning(sQuote("assumedStDevs"), " will be ignored because ", sQuote("stDevsH1"), " is defined", call. = FALSE ) } assumedStDevs <- stDevsH1 } design <- stageResults$.design gMax <- stageResults$getGMax() kMax <- design$kMax results <- ConditionalPowerResultsMultiArmMeans( .design = design, .stageResults = stageResults, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) if (any(is.na(nPlanned))) { return(results) } .assertIsValidStage(stage, kMax) if (stage == kMax) { .logDebug( "Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", kMax, ")" ) return(results) } if (!.isValidNPlanned(nPlanned = nPlanned, kMax = kMax, stage = stage)) { return(results) } .assertIsValidNPlanned(nPlanned, kMax, stage) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) .setValueAndParameterType(results, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) assumedStDevs <- .assertIsValidAssumedStDevForMultiHypotheses( assumedStDevs, stageResults, stage, results = results ) .assertIsValidAssumedStDevs(assumedStDevs, gMax) thetaH1 <- .assertIsValidThetaH1ForMultiArm(thetaH1, stageResults, stage, results = results) results$.setParameterType("nPlanned", C_PARAM_USER_DEFINED) if (length(thetaH1) != 1 && length(thetaH1) != gMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0( "length of 'thetaH1' (%s) ", "must be equal to 'gMax' (%s) or 1" ), .arrayToString(thetaH1), gMax) ) } if (length(assumedStDevs) == 1) { results$assumedStDevs <- rep(assumedStDevs, gMax) results$.setParameterType("assumedStDevs", C_PARAM_GENERATED) } if (.isTrialDesignInverseNormal(design)) { return(.getConditionalPowerMeansMultiArmInverseNormal( results = results, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, ... )) } else if (.isTrialDesignFisher(design)) { return(.getConditionalPowerMeansMultiArmFisher( results = results, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, iterations = iterations, seed = seed, ... )) } else if (.isTrialDesignConditionalDunnett(design)) { return(.getConditionalPowerMeansMultiArmConditionalDunnett( results = results, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, ... )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of TrialDesignInverseNormal, TrialDesignFisher, or ", "TrialDesignConditionalDunnett" ) } #' #' Calculation of conditional power based on inverse normal method #' #' @noRd #' .getConditionalPowerMeansMultiArmInverseNormal <- function(..., results, stageResults, stage, allocationRatioPlanned, nPlanned, thetaH1, assumedStDevs) { design <- stageResults$.design .assertIsTrialDesignInverseNormal(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerMeansMultiArmInverseNormal", ignore = c("stage", "design", "stDevsH1"), ... ) kMax <- design$kMax gMax <- stageResults$getGMax() # results$conditionalPower <- matrix(NA_real_, nrow = gMax, ncol = kMax) weights <- .getWeightsInverseNormal(design) informationRates <- design$informationRates nPlanned <- c(rep(NA_real_, stage), nPlanned) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned if (length(thetaH1) == 1) { thetaH1 <- rep(thetaH1, gMax) results$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { results$.setParameterType("thetaH1", C_PARAM_DEFAULT_VALUE) } results$.setParameterType("assumedStDevs", C_PARAM_DEFAULT_VALUE) if (stageResults$directionUpper) { standardizedEffect <- (thetaH1 - stageResults$thetaH0) / assumedStDevs } else { standardizedEffect <- -(thetaH1 - stageResults$thetaH0) / assumedStDevs } ctr <- .performClosedCombinationTest(stageResults = stageResults) criticalValues <- design$criticalValues for (treatmentArm in 1:gMax) { if (!is.na(ctr$separatePValues[treatmentArm, stage])) { # shifted decision region for use in getGroupSeqProbs # Inverse Normal Method shiftedDecisionRegionUpper <- criticalValues[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, treatmentArm] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - standardizedEffect[treatmentArm] * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, treatmentArm] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - standardizedEffect[treatmentArm] * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) results$conditionalPower[treatmentArm, (stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$thetaH1 <- thetaH1 results$assumedStDevs <- assumedStDevs return(results) } #' #' Calculation of conditional power based on Fisher's combination test #' #' @noRd #' .getConditionalPowerMeansMultiArmFisher <- function(..., results, stageResults, stage, allocationRatioPlanned, nPlanned, thetaH1, assumedStDevs, iterations, seed) { design <- stageResults$.design .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerMeansMultiArmFisher", ignore = c("stage", "design", "stDevsH1"), ... ) kMax <- design$kMax gMax <- stageResults$getGMax() criticalValues <- design$criticalValues weightsFisher <- .getWeightsFisher(design) # results$conditionalPower <- matrix(NA_real_, nrow = gMax, ncol = kMax) results$iterations <- as.integer(iterations) results$.setParameterType("iterations", C_PARAM_USER_DEFINED) results$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) results$seed <- .setSeed(seed) results$simulated <- FALSE results$.setParameterType("simulated", C_PARAM_DEFAULT_VALUE) if (length(thetaH1) == 1) { thetaH1 <- rep(thetaH1, gMax) results$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { results$.setParameterType("thetaH1", C_PARAM_DEFAULT_VALUE) } results$.setParameterType("assumedStDevs", C_PARAM_DEFAULT_VALUE) if (stageResults$directionUpper) { standardizedEffect <- (thetaH1 - stageResults$thetaH0) / assumedStDevs } else { standardizedEffect <- -(thetaH1 - stageResults$thetaH0) / assumedStDevs } nPlanned <- c(rep(NA_real_, stage), nPlanned) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned ctr <- .performClosedCombinationTest(stageResults = stageResults) for (treatmentArm in 1:gMax) { if (!is.na(ctr$separatePValues[treatmentArm, stage])) { if (gMax == 1) { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, treatmentArm] == 1, ][1:stage] } else { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, treatmentArm] == 1, ][which.max( ctr$overallAdjustedTestStatistics[ctr$indices[, treatmentArm] == 1, stage] ), 1:stage] } if (stage < kMax - 1) { for (k in (stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = standardizedEffect[treatmentArm], stage = stage, nPlanned = nPlanned ) } results$conditionalPower[treatmentArm, k] <- reject / iterations } results$simulated <- TRUE results$.setParameterType("simulated", C_PARAM_GENERATED) } else if (stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1 / weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Calculation not possible: could not calculate ", "conditional power for stage ", kMax, call. = FALSE ) results$conditionalPower[treatmentArm, kMax] <- NA_real_ } else { results$conditionalPower[treatmentArm, kMax] <- 1 - stats::pnorm(.getQNorm(result) - standardizedEffect[treatmentArm] * sqrt(nPlanned[kMax])) } } } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$thetaH1 <- thetaH1 results$assumedStDevs <- assumedStDevs if (!results$simulated) { results$iterations <- NA_integer_ results$seed <- NA_real_ results$.setParameterType("iterations", C_PARAM_NOT_APPLICABLE) results$.setParameterType("seed", C_PARAM_NOT_APPLICABLE) } return(results) } #' #' Calculation of conditional power based on conditional Dunnett test #' #' @noRd #' .getConditionalPowerMeansMultiArmConditionalDunnett <- function(..., results, stageResults, stage, allocationRatioPlanned, nPlanned, thetaH1, assumedStDevs) { design <- stageResults$.design .assertIsTrialDesignConditionalDunnett(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerMeansMultiArmConditionalDunnett", ignore = c("stage", "intersectionTest", "design", "stDevsH1"), ... ) if (stage > 1) { warning("Conditional power is only calculated for the first (interim) stage", call. = FALSE) } kMax <- 2 gMax <- stageResults$getGMax() nPlanned <- c(rep(NA_real_, stage), nPlanned) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned if (length(thetaH1) == 1) { thetaH1 <- rep(thetaH1, gMax) results$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { results$.setParameterType("thetaH1", C_PARAM_DEFAULT_VALUE) } results$.setParameterType("assumedStDevs", C_PARAM_DEFAULT_VALUE) if (stageResults$directionUpper) { standardizedEffect <- (thetaH1 - stageResults$thetaH0) / assumedStDevs } else { standardizedEffect <- -(thetaH1 - stageResults$thetaH0) / assumedStDevs } ctr <- .getClosedConditionalDunnettTestResults(stageResults = stageResults, design = design, stage = stage) for (treatmentArm in 1:gMax) { if (!is.na(ctr$separatePValues[treatmentArm, stage])) { results$conditionalPower[treatmentArm, 2] <- 1 - stats::pnorm(.getOneMinusQNorm(min(ctr$conditionalErrorRate[ ctr$indices[, treatmentArm] == 1, stage ], na.rm = TRUE)) - standardizedEffect[treatmentArm] * sqrt(nPlanned[2])) } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$thetaH1 <- thetaH1 results$assumedStDevs <- assumedStDevs return(results) } #' #' Calculation of conditional power and likelihood values for plotting the graph #' #' @noRd #' .getConditionalPowerLikelihoodMeansMultiArm <- function(..., stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaRange, assumedStDevs = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .associatedArgumentsAreDefined(nPlanned = nPlanned, thetaRange = thetaRange) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) design <- stageResults$.design kMax <- design$kMax gMax <- stageResults$getGMax() intersectionTest <- stageResults$intersectionTest assumedStDevs <- .assertIsValidAssumedStDevForMultiHypotheses(assumedStDevs, stageResults, stage) if (length(assumedStDevs) == 1) { assumedStDevs <- rep(assumedStDevs, gMax) } thetaRange <- .assertIsValidThetaRange(thetaRange = thetaRange) treatmentArms <- numeric(gMax * length(thetaRange)) effectValues <- numeric(gMax * length(thetaRange)) condPowerValues <- numeric(gMax * length(thetaRange)) likelihoodValues <- numeric(gMax * length(thetaRange)) stdErr <- stageResults$overallStDevs[, stage] * sqrt(1 / stageResults$.dataInput$getOverallSampleSizes(stage = stage, group = gMax + 1) + 1 / stageResults$.dataInput$getOverallSampleSizes(stage = stage, group = (1:gMax))) results <- ConditionalPowerResultsMultiArmMeans( .design = design, .stageResults = stageResults, assumedStDevs = assumedStDevs, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) j <- 1 for (i in seq(along = thetaRange)) { for (treatmentArm in 1:gMax) { treatmentArms[j] <- treatmentArm effectValues[j] <- thetaRange[i] if (.isTrialDesignInverseNormal(design)) { condPowerValues[j] <- .getConditionalPowerMeansMultiArmInverseNormal( results = results, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], assumedStDevs = assumedStDevs )$conditionalPower[treatmentArm, kMax] } else if (.isTrialDesignFisher(design)) { condPowerValues[j] <- .getConditionalPowerMeansMultiArmFisher( results = results, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], assumedStDevs = assumedStDevs, iterations = iterations, seed = seed )$conditionalPower[treatmentArm, kMax] } else if (.isTrialDesignConditionalDunnett(design)) { condPowerValues[j] <- .getConditionalPowerMeansMultiArmConditionalDunnett( results = results, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], assumedStDevs = assumedStDevs )$conditionalPower[treatmentArm, 2] } likelihoodValues[j] <- stats::dnorm( thetaRange[i], stageResults$effectSizes[treatmentArm, stage], stdErr[treatmentArm] ) / stats::dnorm(0, 0, stdErr[treatmentArm]) j <- j + 1 } } subtitle <- paste0( "Intersection test = ", intersectionTest, ", stage = ", stage, ", # of remaining subjects = ", sum(nPlanned), ", sd = ", .formatSubTitleValue(assumedStDevs, "assumedStDevs"), ", allocation ratio = ", .formatSubTitleValue(allocationRatioPlanned, "allocationRatioPlanned") ) return(list( treatmentArms = treatmentArms, xValues = effectValues, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, xlab = "Effect size", ylab = C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD, sub = subtitle )) } rpact/R/class_design_plan.R0000644000176200001440000027331314445307575015406 0ustar liggesusers## | ## | *Trial design plan classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_constants.R #' @include f_design_utilities.R NULL C_VARIABLE_DESIGN_PLAN_PARAMETERS <- c("lambda1", "pi1", "median1", "alternative", "hazardRatio") C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS <- list( normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = 0, alternative = seq(0.2, 1, 0.2), stDev = 1, groups = 2L, allocationRatioPlanned = 1 ) C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES <- list( normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = 0, pi1 = C_PI_1_SAMPLE_SIZE_DEFAULT, pi2 = C_PI_2_DEFAULT, groups = 2L, allocationRatioPlanned = 1 ) C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_SURVIVAL <- list( typeOfComputation = "Schoenfeld", thetaH0 = 1, pi2 = C_PI_2_DEFAULT, pi1 = C_PI_1_SAMPLE_SIZE_DEFAULT, allocationRatioPlanned = 1, accountForObservationTimes = NA, eventTime = 12, accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, kappa = 1, piecewiseSurvivalTime = NA_real_, lambda2 = NA_real_, lambda1 = NA_real_, followUpTime = C_FOLLOW_UP_TIME_DEFAULT, maxNumberOfSubjects = 0, dropoutRate1 = 0, dropoutRate2 = 0, dropoutTime = 12 ) #' #' @name TrialDesignPlan #' #' @title #' Basic Trial Design Plan #' #' @description #' Basic class for trial design plans. #' #' @details #' \code{TrialDesignPlan} is the basic class for #' \itemize{ #' \item \code{\link{TrialDesignPlanMeans}}, #' \item \code{\link{TrialDesignPlanRates}}, and #' \item \code{\link{TrialDesignPlanSurvival}}. #' } #' #' @include f_core_constants.R #' @include f_core_utilities.R #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include class_design_set.R #' @include f_core_plot.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignPlan <- setRefClass("TrialDesignPlan", contains = "ParameterSet", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .objectType = "character" # "sampleSize" or "power" ), methods = list( initialize = function(design, ...) { callSuper(.design = design, ...) .plotSettings <<- PlotSettings() .parameterNames <<- .getParameterNames(design = design, designPlan = .self) .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS if (.isTrialDesignPlanMeans(.self)) { defaultValueList <- C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS } else if (.isTrialDesignPlanRates(.self)) { defaultValueList <- C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES } else if (.isTrialDesignPlanSurvival(.self)) { defaultValueList <- C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_SURVIVAL } for (parameterName in .getVisibleFieldNames()) { defaultValue <- defaultValueList[[parameterName]] existingValue <- .self[[parameterName]] if (all(is.na(existingValue))) { .setParameterType(parameterName, C_PARAM_DEFAULT_VALUE) } else if (!is.null(defaultValue) && length(defaultValue) == length(existingValue) && !any(is.na(defaultValue)) && !any(is.na(existingValue)) && sum(defaultValue == existingValue) == length(defaultValue)) { .setParameterType(parameterName, C_PARAM_DEFAULT_VALUE) } else { .setParameterType(parameterName, C_PARAM_USER_DEFINED) } } .setParameterType("optimumAllocationRatio", C_PARAM_NOT_APPLICABLE) }, .setSampleSizeObject = function(objectType) { if (length(objectType) == 0 || !(objectType %in% c("sampleSize", "power"))) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'.objectType' (", objectType, ") must be specified as 'sampleSize' or 'power'" ) } .objectType <<- objectType }, .isSampleSizeObject = function() { if (length(.objectType) == 0 || !(.objectType %in% c("sampleSize", "power"))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'.objectType' must be specified as 'sampleSize' or 'power'") } return(.objectType == "sampleSize") }, .isPowerObject = function() { if (length(.objectType) == 0 || !(.objectType %in% c("sampleSize", "power"))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'.objectType' must be specified as 'sampleSize' or 'power'") } return(.objectType == "power") }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing trial plan objects" .resetCat() if (showType == 3) { .createSummary(.self, digits = digits)$.show( showType = 1, digits = digits, consoleOutputEnabled = consoleOutputEnabled ) } else if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Design plan parameters and output for ", .toString(), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDesignParametersToShow(.self), "Design parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), "Sample size and output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) if (inherits(.self, "TrialDesignPlanSurvival") || groups == 2 || .design$kMax > 1) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled ) if (inherits(.self, "TrialDesignPlanSurvival") || groups == 2) { .cat(" (i): values of treatment arm i\n", consoleOutputEnabled = consoleOutputEnabled ) } if (.design$kMax > 1) { .cat(" [k]: values at stage k\n", consoleOutputEnabled = consoleOutputEnabled) } } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } }, getAlpha = function() { return(.design$alpha) }, getBeta = function() { if (.isTrialDesignInverseNormalOrGroupSequential(.design)) { return(.design$beta) } return(NA_real_) }, getSided = function() { return(.design$sided) }, getTwoSidedPower = function() { if (.isTrialDesignInverseNormalOrGroupSequential(.design)) { return(.design$twoSidedPower) } return(NA) }, .toString = function(startWithUpperCase = FALSE) { if (.isTrialDesignPlanMeans(.self)) { s <- "means" } else if (.isTrialDesignPlanRates(.self)) { s <- "rates" } else if (.isTrialDesignPlanSurvival(.self)) { s <- "survival data" } else { s <- paste0("unknown data class '", .getClassName(.self), "'") } return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) } ) ) #' #' @title #' Coerce Trial Design Plan to a Data Frame #' #' @description #' Returns the \code{\link{TrialDesignPlan}} as data frame. #' #' @param x A \code{\link{TrialDesignPlan}} object. #' @inheritParams param_niceColumnNamesEnabled #' @inheritParams param_includeAllParameters #' @inheritParams param_three_dots #' #' @details #' Coerces the design plan to a data frame. #' #' @template return_dataframe #' #' @examples #' as.data.frame(getSampleSizeMeans()) #' #' @export #' #' @keywords internal #' as.data.frame.TrialDesignPlan <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ...) { return(.getAsDataFrame( parameterSet = x, parameterNames = NULL, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters )) } #' #' @name TrialDesignPlanMeans #' #' @title #' Trial Design Plan Means #' #' @description #' Trial design plan for means. #' #' @template field_meanRatio #' @template field_thetaH0 #' @template field_normalApproximation #' @template field_alternative #' @template field_stDev #' @template field_groups #' @template field_allocationRatioPlanned #' @template field_optimumAllocationRatio #' @template field_directionUpper #' @template field_effect #' @template field_overallReject #' @template field_rejectPerStage #' @template field_futilityStop #' @template field_futilityPerStage #' @template field_earlyStop #' @template field_expectedNumberOfSubjects #' @template field_nFixed #' @template field_nFixed1 #' @template field_nFixed2 #' @template field_informationRates #' @template field_maxNumberOfSubjects #' @template field_maxNumberOfSubjects1 #' @template field_maxNumberOfSubjects2 #' @template field_numberOfSubjects #' @template field_numberOfSubjects1 #' @template field_numberOfSubjects2 #' @template field_expectedNumberOfSubjectsH0 #' @template field_expectedNumberOfSubjectsH01 #' @template field_expectedNumberOfSubjectsH1 #' @template field_criticalValuesEffectScale #' @template field_criticalValuesEffectScaleLower #' @template field_criticalValuesEffectScaleUpper #' @template field_criticalValuesPValueScale #' @template field_futilityBoundsEffectScale #' @template field_futilityBoundsEffectScaleLower #' @template field_futilityBoundsEffectScaleUpper #' @template field_futilityBoundsPValueScale #' #' @details #' This object cannot be created directly; use \code{\link[=getSampleSizeMeans]{getSampleSizeMeans()}} #' with suitable arguments to create a design plan for a dataset of means. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_design_set.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignPlanMeans <- setRefClass("TrialDesignPlanMeans", contains = "TrialDesignPlan", fields = list( meanRatio = "logical", thetaH0 = "numeric", normalApproximation = "logical", alternative = "numeric", stDev = "numeric", groups = "numeric", allocationRatioPlanned = "numeric", optimumAllocationRatio = "logical", directionUpper = "logical", effect = "numeric", overallReject = "numeric", rejectPerStage = "matrix", futilityStop = "numeric", futilityPerStage = "matrix", earlyStop = "numeric", expectedNumberOfSubjects = "numeric", nFixed = "numeric", nFixed1 = "numeric", nFixed2 = "numeric", informationRates = "matrix", maxNumberOfSubjects = "numeric", maxNumberOfSubjects1 = "numeric", maxNumberOfSubjects2 = "numeric", numberOfSubjects = "matrix", numberOfSubjects1 = "matrix", numberOfSubjects2 = "matrix", expectedNumberOfSubjectsH0 = "numeric", expectedNumberOfSubjectsH01 = "numeric", expectedNumberOfSubjectsH1 = "numeric", criticalValuesEffectScale = "matrix", criticalValuesEffectScaleLower = "matrix", criticalValuesEffectScaleUpper = "matrix", criticalValuesPValueScale = "matrix", futilityBoundsEffectScale = "matrix", futilityBoundsEffectScaleLower = "matrix", futilityBoundsEffectScaleUpper = "matrix", futilityBoundsPValueScale = "matrix" ), methods = list( initialize = function(..., normalApproximation = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["normalApproximation"]], meanRatio = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["meanRatio"]], thetaH0 = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["thetaH0"]], alternative = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["alternative"]], stDev = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["stDev"]], groups = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["groups"]], allocationRatioPlanned = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_MEANS[["allocationRatioPlanned"]]) { callSuper(..., normalApproximation = normalApproximation, meanRatio = meanRatio, thetaH0 = thetaH0, alternative = alternative, stDev = stDev, groups = groups, allocationRatioPlanned = allocationRatioPlanned ) optimumAllocationRatio <<- FALSE visibleFieldNames <- .getVisibleFieldNames() startIndex <- which(visibleFieldNames == "directionUpper") for (i in startIndex:length(visibleFieldNames)) { .setParameterType(visibleFieldNames[i], C_PARAM_NOT_APPLICABLE) } if (groups == 1) { .setParameterType("meanRatio", C_PARAM_NOT_APPLICABLE) .setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) } .setParameterType("maxNumberOfSubjects1", C_PARAM_NOT_APPLICABLE) .setParameterType("maxNumberOfSubjects2", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScale", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleLower", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleUpper", C_PARAM_NOT_APPLICABLE) .setParameterType("futilityBoundsEffectScale", C_PARAM_NOT_APPLICABLE) .setParameterType("futilityBoundsEffectScaleLower", C_PARAM_NOT_APPLICABLE) .setParameterType("futilityBoundsEffectScaleUpper", C_PARAM_NOT_APPLICABLE) }, clone = function(alternative = NA_real_) { alternativeTemp <- alternative if (any(is.na(alternative))) { alternativeTemp <- .self$alternative } if (.objectType == "sampleSize") { result <- getSampleSizeMeans( design = .self$.design, normalApproximation = .self$.getParameterValueIfUserDefinedOrDefault("normalApproximation"), meanRatio = .self$meanRatio, # .getParameterValueIfUserDefinedOrDefault("meanRatio"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), alternative = alternativeTemp, stDev = .self$.getParameterValueIfUserDefinedOrDefault("stDev"), groups = .self$.getParameterValueIfUserDefinedOrDefault("groups"), allocationRatioPlanned = .self$.getParameterValueIfUserDefinedOrDefault("allocationRatioPlanned") ) } else { result <- getPowerMeans( design = .self$.design, normalApproximation = .self$.getParameterValueIfUserDefinedOrDefault("normalApproximation"), meanRatio = .self$meanRatio, # .getParameterValueIfUserDefinedOrDefault("meanRatio"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), alternative = alternativeTemp, stDev = .self$.getParameterValueIfUserDefinedOrDefault("stDev"), directionUpper = .self$.getParameterValueIfUserDefinedOrDefault("directionUpper"), maxNumberOfSubjects = .self$.getParameterValueIfUserDefinedOrDefault("maxNumberOfSubjects"), groups = .self$.getParameterValueIfUserDefinedOrDefault("groups"), allocationRatioPlanned = .self$.getParameterValueIfUserDefinedOrDefault("allocationRatioPlanned") ) } result$.plotSettings <- .self$.plotSettings return(result) }, show = function(showType = 1, digits = NA_integer_) { "Method for automatically printing trial plan objects" callSuper(showType = showType, digits = digits) } ) ) #' #' @name TrialDesignPlanRates #' #' @title #' Trial Design Plan Rates #' #' @description #' Trial design plan for rates. #' #' @template field_riskRatio #' @template field_thetaH0 #' @template field_normalApproximation #' @template field_pi1 #' @template field_pi2 #' @template field_groups #' @template field_allocationRatioPlanned #' @template field_optimumAllocationRatio #' @template field_directionUpper #' @template field_effect #' @template field_overallReject #' @template field_rejectPerStage #' @template field_futilityStop #' @template field_futilityPerStage #' @template field_earlyStop #' @template field_expectedNumberOfSubjects #' @template field_nFixed #' @template field_nFixed1 #' @template field_nFixed2 #' @template field_informationRates #' @template field_maxNumberOfSubjects #' @template field_maxNumberOfSubjects1 #' @template field_maxNumberOfSubjects2 #' @template field_numberOfSubjects #' @template field_numberOfSubjects1 #' @template field_numberOfSubjects2 #' @template field_expectedNumberOfSubjectsH0 #' @template field_expectedNumberOfSubjectsH01 #' @template field_expectedNumberOfSubjectsH1 #' @template field_criticalValuesEffectScale #' @template field_criticalValuesEffectScaleLower #' @template field_criticalValuesEffectScaleUpper #' @template field_criticalValuesPValueScale #' @template field_futilityBoundsEffectScale #' @template field_futilityBoundsEffectScaleLower #' @template field_futilityBoundsEffectScaleUpper #' @template field_futilityBoundsPValueScale #' #' @details #' This object cannot be created directly; use \code{\link[=getSampleSizeRates]{getSampleSizeRates()}} #' with suitable arguments to create a design plan for a dataset of rates. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_design_set.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignPlanRates <- setRefClass("TrialDesignPlanRates", contains = "TrialDesignPlan", fields = list( riskRatio = "logical", thetaH0 = "numeric", normalApproximation = "logical", pi1 = "numeric", pi2 = "numeric", groups = "numeric", allocationRatioPlanned = "numeric", optimumAllocationRatio = "logical", directionUpper = "logical", effect = "numeric", expectedNumberOfSubjects = "numeric", nFixed = "numeric", nFixed1 = "numeric", nFixed2 = "numeric", overallReject = "numeric", rejectPerStage = "matrix", futilityStop = "numeric", futilityPerStage = "matrix", earlyStop = "numeric", informationRates = "matrix", maxNumberOfSubjects = "numeric", maxNumberOfSubjects1 = "numeric", maxNumberOfSubjects2 = "numeric", numberOfSubjects = "matrix", numberOfSubjects1 = "matrix", numberOfSubjects2 = "matrix", expectedNumberOfSubjectsH0 = "numeric", expectedNumberOfSubjectsH01 = "numeric", expectedNumberOfSubjectsH1 = "numeric", criticalValuesEffectScale = "matrix", criticalValuesEffectScaleLower = "matrix", criticalValuesEffectScaleUpper = "matrix", criticalValuesPValueScale = "matrix", futilityBoundsEffectScale = "matrix", futilityBoundsEffectScaleLower = "matrix", futilityBoundsEffectScaleUpper = "matrix", futilityBoundsPValueScale = "matrix" ), methods = list( initialize = function(..., normalApproximation = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["normalApproximation"]], riskRatio = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["riskRatio"]], thetaH0 = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["thetaH0"]], pi1 = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["pi1"]], pi2 = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["pi2"]], groups = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["groups"]], allocationRatioPlanned = C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_RATES[["allocationRatioPlanned"]]) { callSuper(..., normalApproximation = normalApproximation, riskRatio = riskRatio, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, groups = groups, allocationRatioPlanned = allocationRatioPlanned ) optimumAllocationRatio <<- FALSE visibleFieldNames <- .getVisibleFieldNames() startIndex <- which(visibleFieldNames == "directionUpper") for (i in startIndex:length(visibleFieldNames)) { .setParameterType(visibleFieldNames[i], C_PARAM_NOT_APPLICABLE) } if (groups == 1) { .setParameterType("meanRatio", C_PARAM_NOT_APPLICABLE) .setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) } .setParameterType("maxNumberOfSubjects1", C_PARAM_NOT_APPLICABLE) .setParameterType("maxNumberOfSubjects2", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScale", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleLower", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleUpper", C_PARAM_NOT_APPLICABLE) .setParameterType("futilityBoundsEffectScale", C_PARAM_NOT_APPLICABLE) .setParameterType("futilityBoundsEffectScaleLower", C_PARAM_NOT_APPLICABLE) .setParameterType("futilityBoundsEffectScaleUpper", C_PARAM_NOT_APPLICABLE) }, clone = function(pi1 = NA_real_) { pi1Temp <- pi1 if (any(is.na(pi1))) { pi1Temp <- .self$pi1 } if (.objectType == "sampleSize") { return(getSampleSizeRates( design = .self$.design, normalApproximation = .self$.getParameterValueIfUserDefinedOrDefault("normalApproximation"), riskRatio = .self$riskRatio, # .getParameterValueIfUserDefinedOrDefault("riskRatio"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), pi1 = pi1Temp, pi2 = .self$.getParameterValueIfUserDefinedOrDefault("pi2"), groups = .self$.getParameterValueIfUserDefinedOrDefault("groups"), allocationRatioPlanned = .self$.getParameterValueIfUserDefinedOrDefault("allocationRatioPlanned") )) } else { return(getPowerRates( design = .self$.design, riskRatio = .self$riskRatio, # .getParameterValueIfUserDefinedOrDefault("riskRatio"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), pi1 = pi1Temp, pi2 = .self$.getParameterValueIfUserDefinedOrDefault("pi2"), directionUpper = .self$.getParameterValueIfUserDefinedOrDefault("directionUpper"), maxNumberOfSubjects = .self$.getParameterValueIfUserDefinedOrDefault("maxNumberOfSubjects"), groups = .self$.getParameterValueIfUserDefinedOrDefault("groups"), allocationRatioPlanned = .self$.getParameterValueIfUserDefinedOrDefault("allocationRatioPlanned") )) } }, show = function(showType = 1, digits = NA_integer_) { "Method for automatically printing trial plan objects" callSuper(showType = showType, digits = digits) } ) ) #' #' @name TrialDesignPlanSurvival #' #' @title #' Trial Design Plan Survival #' #' @description #' Trial design plan for survival data. #' #' @template field_thetaH0 #' @template field_typeOfComputation #' @template field_directionUpper #' @template field_pi1_survival #' @template field_pi2_survival #' @template field_median1 #' @template field_median2 #' @template field_lambda1 #' @template field_lambda2 #' @template field_hazardRatio #' @template field_maxNumberOfSubjects #' @template field_maxNumberOfSubjects1 #' @template field_maxNumberOfSubjects2 #' @template field_maxNumberOfEvents #' @template field_allocationRatioPlanned #' @template field_optimumAllocationRatio #' @template field_accountForObservationTimes #' @template field_eventTime #' @template field_accrualTime #' @template field_totalAccrualTime #' @template field_accrualIntensity #' @template field_accrualIntensityRelative #' @template field_kappa #' @template field_piecewiseSurvivalTime #' @template field_followUpTime #' @template field_dropoutRate1 #' @template field_dropoutRate2 #' @template field_dropoutTime #' @template field_chi #' @template field_expectedNumberOfEvents #' @template field_eventsFixed #' @template field_nFixed #' @template field_nFixed1 #' @template field_nFixed2 #' @template field_overallReject #' @template field_rejectPerStage #' @template field_futilityStop #' @template field_futilityPerStage #' @template field_earlyStop #' @template field_informationRates #' @template field_analysisTime #' @template field_studyDurationH1 #' @template field_studyDuration #' @template field_maxStudyDuration #' @template field_eventsPerStage #' @template field_expectedEventsH0 #' @template field_expectedEventsH01 #' @template field_expectedEventsH1 #' @template field_numberOfSubjects #' @template field_numberOfSubjects1 #' @template field_numberOfSubjects2 #' @template field_expectedNumberOfSubjectsH1 #' @template field_expectedNumberOfSubjects #' @template field_criticalValuesEffectScale #' @template field_criticalValuesEffectScaleLower #' @template field_criticalValuesEffectScaleUpper #' @template field_criticalValuesPValueScale #' @template field_futilityBoundsEffectScale #' @template field_futilityBoundsEffectScaleLower #' @template field_futilityBoundsEffectScaleUpper #' @template field_futilityBoundsPValueScale #' #' @details #' This object cannot be created directly; use \code{\link[=getSampleSizeSurvival]{getSampleSizeSurvival()}} #' with suitable arguments to create a design plan for a dataset of survival data. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_design_set.R #' @include class_time.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignPlanSurvival <- setRefClass("TrialDesignPlanSurvival", contains = "TrialDesignPlan", fields = list( .piecewiseSurvivalTime = "PiecewiseSurvivalTime", .accrualTime = "AccrualTime", .calculateFollowUpTime = "logical", thetaH0 = "numeric", typeOfComputation = "character", directionUpper = "logical", pi1 = "numeric", pi2 = "numeric", median1 = "numeric", median2 = "numeric", lambda1 = "numeric", lambda2 = "numeric", hazardRatio = "numeric", maxNumberOfSubjects = "numeric", maxNumberOfSubjects1 = "numeric", maxNumberOfSubjects2 = "numeric", maxNumberOfEvents = "numeric", allocationRatioPlanned = "numeric", optimumAllocationRatio = "logical", accountForObservationTimes = "logical", eventTime = "numeric", accrualTime = "numeric", totalAccrualTime = "numeric", accrualIntensity = "numeric", accrualIntensityRelative = "numeric", kappa = "numeric", piecewiseSurvivalTime = "numeric", followUpTime = "numeric", dropoutRate1 = "numeric", dropoutRate2 = "numeric", dropoutTime = "numeric", chi = "numeric", expectedNumberOfEvents = "numeric", eventsFixed = "numeric", nFixed = "numeric", nFixed1 = "numeric", nFixed2 = "numeric", overallReject = "numeric", rejectPerStage = "matrix", futilityStop = "numeric", futilityPerStage = "matrix", earlyStop = "numeric", informationRates = "matrix", analysisTime = "matrix", studyDurationH1 = "numeric", studyDuration = "numeric", maxStudyDuration = "numeric", eventsPerStage = "matrix", expectedEventsH0 = "numeric", expectedEventsH01 = "numeric", expectedEventsH1 = "numeric", numberOfSubjects = "matrix", numberOfSubjects1 = "matrix", numberOfSubjects2 = "matrix", expectedNumberOfSubjectsH1 = "numeric", expectedNumberOfSubjects = "numeric", criticalValuesEffectScale = "matrix", criticalValuesEffectScaleLower = "matrix", criticalValuesEffectScaleUpper = "matrix", criticalValuesPValueScale = "matrix", futilityBoundsEffectScale = "matrix", futilityBoundsEffectScaleLower = "matrix", futilityBoundsEffectScaleUpper = "matrix", futilityBoundsPValueScale = "matrix" ), methods = list( initialize = function(...) { callSuper(...) optimumAllocationRatio <<- FALSE visibleFieldNames <- .getVisibleFieldNames() startIndex <- which(visibleFieldNames == "hazardRatio") for (i in startIndex:length(visibleFieldNames)) { .setParameterType(visibleFieldNames[i], C_PARAM_NOT_APPLICABLE) } .setParameterType("maxNumberOfSubjects", C_PARAM_NOT_APPLICABLE) .setParameterType("maxNumberOfSubjects1", C_PARAM_NOT_APPLICABLE) .setParameterType("maxNumberOfSubjects2", C_PARAM_NOT_APPLICABLE) .setParameterType("median1", C_PARAM_NOT_APPLICABLE) .setParameterType("median2", C_PARAM_NOT_APPLICABLE) .setParameterType("accountForObservationTimes", C_PARAM_NOT_APPLICABLE) .setParameterType("chi", C_PARAM_NOT_APPLICABLE) .setParameterType("maxStudyDuration", C_PARAM_NOT_APPLICABLE) .setParameterType("accrualIntensityRelative", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScale", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleLower", C_PARAM_NOT_APPLICABLE) .setParameterType("criticalValuesEffectScaleUpper", C_PARAM_NOT_APPLICABLE) .setParameterType("futilityBoundsEffectScale", C_PARAM_NOT_APPLICABLE) .setParameterType("futilityBoundsEffectScaleLower", C_PARAM_NOT_APPLICABLE) .setParameterType("futilityBoundsEffectScaleUpper", C_PARAM_NOT_APPLICABLE) # set default values for (parameterName in c( "eventTime", "accrualTime", "accrualIntensity", "kappa", "piecewiseSurvivalTime", "lambda1", "lambda2", "followUpTime", "dropoutTime" )) { .setDefaultValue(parameterName) } }, clone = function(hazardRatio = NA_real_, pi1 = NA_real_) { hr <- NA_real_ if (.getParameterType("hazardRatio") == C_PARAM_USER_DEFINED) { hr <- hazardRatio if (any(is.na(hazardRatio))) { hr <- .self$hazardRatio } } pi1Temp <- NA_real_ if (.getParameterType("pi1") == C_PARAM_USER_DEFINED) { pi1Temp <- pi1 if (any(is.na(pi1))) { pi1Temp <- .self$pi1 } } accrualTimeTemp <- .self$.getParameterValueIfUserDefinedOrDefault("accrualTime") if (!is.null(accrualTimeTemp) && length(accrualTimeTemp) > 0 && !all(is.na(accrualTimeTemp)) && accrualTimeTemp[1] != 0) { accrualTimeTemp <- c(0, accrualTimeTemp) } accrualIntensityTemp <- .self$.getParameterValueIfUserDefinedOrDefault("accrualIntensity") if (all(is.na(accrualIntensityTemp))) { accrualIntensityTemp <- C_ACCRUAL_INTENSITY_DEFAULT } if (.objectType == "sampleSize") { return(getSampleSizeSurvival( design = .self$.design, typeOfComputation = .self$.getParameterValueIfUserDefinedOrDefault("typeOfComputation"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), pi1 = pi1Temp, pi2 = .self$.getParameterValueIfUserDefinedOrDefault("pi2"), allocationRatioPlanned = .self$allocationRatioPlanned, accountForObservationTimes = .self$.getParameterValueIfUserDefinedOrDefault("accountForObservationTimes"), eventTime = .self$eventTime, accrualTime = accrualTimeTemp, accrualIntensity = accrualIntensityTemp, kappa = .self$kappa, piecewiseSurvivalTime = .self$.getParameterValueIfUserDefinedOrDefault("piecewiseSurvivalTime"), lambda2 = .self$.getParameterValueIfUserDefinedOrDefault("lambda2"), lambda1 = .self$.getParameterValueIfUserDefinedOrDefault("lambda1"), followUpTime = .self$.getParameterValueIfUserDefinedOrDefault("followUpTime"), maxNumberOfSubjects = .self$.getParameterValueIfUserDefinedOrDefault("maxNumberOfSubjects"), dropoutRate1 = .self$dropoutRate1, dropoutRate2 = .self$dropoutRate2, dropoutTime = .self$dropoutTime, hazardRatio = hr )) } else { directionUpperTemp <- directionUpper if (length(directionUpperTemp) > 1) { directionUpperTemp <- directionUpperTemp[1] } return(getPowerSurvival( design = .self$.design, typeOfComputation = .self$.getParameterValueIfUserDefinedOrDefault("typeOfComputation"), thetaH0 = .self$.getParameterValueIfUserDefinedOrDefault("thetaH0"), pi1 = pi1Temp, pi2 = .self$.getParameterValueIfUserDefinedOrDefault("pi2"), directionUpper = directionUpperTemp, allocationRatioPlanned = .self$allocationRatioPlanned, eventTime = .self$eventTime, accrualTime = accrualTimeTemp, accrualIntensity = accrualIntensityTemp, kappa = .self$kappa, piecewiseSurvivalTime = .self$.getParameterValueIfUserDefinedOrDefault("piecewiseSurvivalTime"), lambda2 = .self$.getParameterValueIfUserDefinedOrDefault("lambda2"), lambda1 = .self$.getParameterValueIfUserDefinedOrDefault("lambda1"), hazardRatio = hr, maxNumberOfSubjects = .self$.getParameterValueIfUserDefinedOrDefault("maxNumberOfSubjects"), maxNumberOfEvents = .self$.getParameterValueIfUserDefinedOrDefault("maxNumberOfEvents"), dropoutRate1 = .self$dropoutRate1, dropoutRate2 = .self$dropoutRate2, dropoutTime = .self$dropoutTime )) } }, .setDefaultValue = function(argumentName) { if (is.null(.self[[argumentName]]) || all(is.na(.self[[argumentName]]))) { .self[[argumentName]] <<- C_TRIAL_DESIGN_PLAN_DEFAULT_VALUES_SURVIVAL[[argumentName]] .setParameterType(argumentName, C_PARAM_DEFAULT_VALUE) } }, show = function(showType = 1, digits = NA_integer_) { "Method for automatically printing trial plan objects" callSuper(showType = showType, digits = digits) }, .warnInCaseArgumentExists = function(argument, argumentName) { if (!all(is.na(argument)) && any(argument > 0)) { warning(sprintf( "Specified '%s' (%s) not taken into account", argumentName, .arrayToString(argument) ), call. = FALSE) } } ) ) .addPlotSubTitleItems <- function(designPlan, designMaster, items, type) { if (type %in% c(1, 3, 4)) { return(invisible()) } if (.isTrialDesignPlanMeans(designPlan)) { nMax <- designPlan$maxNumberOfSubjects[1] # use first value for plotting if (!(type %in% c(5))) { items$add("N", round(nMax, 1), "max") } if ((type %in% c(5)) && !(items$title == "Sample Size")) { items$add("N", round(nMax, 1), "max") } if (designPlan$meanRatio) { items$add("coefficient of variation", designPlan$stDev) } else { items$add("standard deviation", designPlan$stDev) } if (designPlan$groups == 1) { if (type %in% c(2, (5:9))) { items$add("H0: mu", designPlan$thetaH0) items$add("allocation ratio", round(designPlan$allocationRatioPlanned, 2)) } } else { if (type %in% c(2, (5:9))) { if (designPlan$meanRatio) { items$add("H0: mean ratio", designPlan$thetaH0) } else { items$add("H0: mean difference", designPlan$thetaH0) } items$add("allocation ratio", round(designPlan$allocationRatioPlanned, 2)) } } } else if (.isTrialDesignPlanRates(designPlan)) { nMax <- designPlan$maxNumberOfSubjects[1] # use first value for plotting if (!(type %in% c(5))) { items$add("N", round(nMax, 1), "max") } if ((type %in% c(5)) && !(items$title == "Sample Size")) { items$add("N", round(nMax, 1), "max") } if (designPlan$groups == 2 && !(type %in% c(3, 4)) && length(designPlan$pi2) == 1 && !is.na(designPlan$pi2)) { items$add("pi", designPlan$pi2, 2) } if (designPlan$groups == 1) { if (type %in% c(2, (5:9))) { items$add("H0: pi", designPlan$thetaH0) } } else { if (type %in% c(2, (5:9))) { if (designPlan$riskRatio) { items$add("H0: risk ratio", designPlan$thetaH0) } else { items$add("H0: risk difference", designPlan$thetaH0) } items$add("allocation ratio", round(designPlan$allocationRatioPlanned, 2)) } } } else if (.isTrialDesignPlanSurvival(designPlan)) { if (designPlan$.isPowerObject() && !(type %in% (13:14))) { items$add("maximum number of events", designPlan$maxNumberOfEvents[1]) } if (type %in% (10:12)) { items$add("maximum number of subjects", designPlan$maxNumberOfSubjects[1]) } if (type %in% c(2, (5:12))) { items$add("H0: hazard ratio", designPlan$thetaH0) items$add("allocation ratio", round(designPlan$allocationRatioPlanned, 2)) } } } .assertIsValidVariedParameterVectorForPlotting <- function(designPlan, plotType) { if (.isTrialDesignPlanMeans(designPlan)) { if (is.null(designPlan$alternative) || any(is.na(designPlan$alternative)) || length(designPlan$alternative) <= 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'alternative' with length > 1 is defined" ) } } else if (.isTrialDesignPlanRates(designPlan)) { if (is.null(designPlan$pi1) || any(is.na(designPlan$pi1)) || length(designPlan$pi1) <= 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'pi1' with length > 1 is defined" ) } } else if (.isTrialDesignPlanSurvival(designPlan)) { if (is.null(designPlan$hazardRatio) || any(is.na(designPlan$hazardRatio)) || length(designPlan$hazardRatio) <= 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'hazardRatio' with length > 1 is defined" ) } } } .plotTrialDesignPlan <- function(designPlan, type = 1L, main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, designPlanName = NA_character_, plotSettings = NULL, ...) { .assertGgplotIsInstalled() .assertIsTrialDesignPlan(designPlan) .assertIsValidLegendPosition(legendPosition) .assertIsSingleInteger(type, "type", naAllowed = FALSE, validateType = FALSE) theta <- .assertIsValidThetaRange(thetaRange = theta) survivalDesignPlanEnabled <- .isTrialDesignPlanSurvival(designPlan) nMax <- ifelse(survivalDesignPlanEnabled, designPlan$maxNumberOfEvents[1], designPlan$maxNumberOfSubjects[1] ) # use first value for plotting if (is.null(plotSettings)) { plotSettings <- designPlan$.plotSettings } designMaster <- designPlan$.design if (designMaster$kMax == 1 && (type %in% c(1:4))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not available for 'kMax' = 1" ) } if (designPlan$.isSampleSizeObject()) { if (survivalDesignPlanEnabled) { if (!(type %in% c(1:5, 13, 14))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 2, 3, 4, 5, 13 or 14" ) } } else { if (!(type %in% c(1:5))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 2, 3, 4, 5" ) } } } if (is.na(plotPointsEnabled)) { plotPointsEnabled <- type < 4 } ratioEnabled <- (survivalDesignPlanEnabled || (.isTrialDesignPlanMeans(designPlan) && designPlan$meanRatio) || (.isTrialDesignPlanRates(designPlan) && designPlan$riskRatio)) variedParameters <- logical(0) showSourceHint <- "" if (type %in% c(5:12)) { if (.isTrialDesignPlanMeans(designPlan) && length(designPlan$alternative) == 2 && designPlan$.getParameterType("alternative") == C_PARAM_USER_DEFINED) { if (!is.logical(showSource) || isTRUE(showSource)) { showSourceHint <- .getVariedParameterHint(designPlan$alternative, "alternative") } designPlan <- designPlan$clone( alternative = .getVariedParameterVector(designPlan$alternative, "alternative") ) } else if ((.isTrialDesignPlanRates(designPlan) || survivalDesignPlanEnabled) && length(designPlan$pi1) == 2 && designPlan$.getParameterType("pi1") == C_PARAM_USER_DEFINED) { if (!is.logical(showSource) || isTRUE(showSource)) { showSourceHint <- .getVariedParameterHint(designPlan$pi1, "pi1") } designPlan <- designPlan$clone( pi1 = .getVariedParameterVector(designPlan$pi1, "pi1") ) } else if (survivalDesignPlanEnabled && length(designPlan$hazardRatio) == 2 && designPlan$.getParameterType("hazardRatio") == C_PARAM_USER_DEFINED) { if (!is.logical(showSource) || isTRUE(showSource)) { showSourceHint <- .getVariedParameterHint(designPlan$hazardRatio, "hazardRatio") } designPlan <- designPlan$clone( hazardRatio = .getVariedParameterVector(designPlan$hazardRatio, "hazardRatio") ) } } srcCmd <- NULL reducedParam <- NULL if (type %in% c(1:4)) { reducedParam <- .warnInCaseOfUnusedValuesForPlotting(designPlan) } if (type == 1) { # Boundary plot if (survivalDesignPlanEnabled) { if (is.na(main)) { main <- PlotSubTitleItems(title = "Boundaries Z Scale") .addPlotSubTitleItems(designPlan, designMaster, main, type) if (!is.null(reducedParam)) { main$add(reducedParam$title, reducedParam$value, reducedParam$subscript) } } if (designMaster$sided == 1) { designPlan <- data.frame( eventsPerStage = designPlan$eventsPerStage[, 1], criticalValues = designMaster$criticalValues, futilityBounds = c(designMaster$futilityBounds, designMaster$criticalValues[designMaster$kMax]) ) } else { designPlan <- data.frame( eventsPerStage = designPlan$eventsPerStage[, 1], criticalValues = designMaster$criticalValues, criticalValuesMirrored = -designMaster$criticalValues ) } xParameterName <- "eventsPerStage" if (designMaster$sided == 1) { if (any(designMaster$futilityBounds > C_FUTILITY_BOUNDS_DEFAULT)) { yParameterNames <- c("futilityBounds", "criticalValues") } else { yParameterNames <- "criticalValues" } yParameterNamesSrc <- yParameterNames } else { yParameterNames <- c("criticalValues", "criticalValuesMirrored") yParameterNamesSrc <- c("criticalValues", paste0("-", designPlanName, "$.design$criticalValues")) } if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } srcCmd <- .showPlotSourceInformation( objectName = paste0(designPlanName, "$.design"), xParameterName = paste0(designPlanName, "$", xParameterName, "[, 1]"), yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else { if (is.na(main)) { main <- PlotSubTitleItems(title = "Boundaries") .addPlotSubTitleItems(designPlan, designMaster, main, type) if (!is.null(reducedParam)) { main$add(reducedParam$title, reducedParam$value, reducedParam$subscript) } } designSet <- TrialDesignSet(design = designMaster, singleDesign = TRUE) designSet$.plotSettings <- designPlan$.plotSettings designPlanName <- paste0(designPlanName, "$.design") return(.plotTrialDesignSet( x = designSet, y = NULL, main = main, xlab = xlab, ylab = ylab, type = type, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, designSetName = designPlanName, showSource = showSource, plotSettings = plotSettings # , ... )) } } else if (type == 2) { # Effect Scale Boundary plot if (is.na(main)) { main <- PlotSubTitleItems(title = "Boundaries Effect Scale") .addPlotSubTitleItems(designPlan, designMaster, main, type) if (!is.null(reducedParam)) { main$add(reducedParam$title, reducedParam$value, reducedParam$subscript) } } if (is.na(ylab)) { if (.isTrialDesignPlanMeans(designPlan)) { if (designPlan$groups == 1) { ylab <- "Mean" } else if (!designPlan$meanRatio) { ylab <- "Mean Difference" } else { ylab <- "Mean Ratio" } } else if (.isTrialDesignPlanRates(designPlan)) { if (designPlan$groups == 1) { ylab <- "Rate" } else if (!designPlan$riskRatio) { ylab <- "Rate Difference" } else { ylab <- "Risk Ratio" } } else if (survivalDesignPlanEnabled) { ylab <- "Hazard Ratio" } } groupedPlotEnabled <- FALSE yParameterNamesSrc <- c() if (designMaster$sided == 1) { if (any(designMaster$futilityBounds > C_FUTILITY_BOUNDS_DEFAULT)) { data <- data.frame( criticalValuesEffectScale = designPlan$criticalValuesEffectScale[, 1], futilityBoundsEffectScale = c( designPlan$futilityBoundsEffectScale[, 1], designPlan$criticalValuesEffectScale[designMaster$kMax, 1] ) ) yParameterNamesSrc <- c(yParameterNamesSrc, "criticalValuesEffectScale[, 1]") yParameterNamesSrc <- c(yParameterNamesSrc, paste0( "c(", designPlanName, "$futilityBoundsEffectScale[, 1], ", designPlanName, "$criticalValuesEffectScale[nrow(", designPlanName, "$criticalValuesEffectScale), 1])" )) } else { data <- data.frame( criticalValuesEffectScale = designPlan$criticalValuesEffectScale[, 1] ) yParameterNamesSrc <- c(yParameterNamesSrc, "criticalValuesEffectScale[, 1]") } } else if (designMaster$typeOfDesign == C_TYPE_OF_DESIGN_PT) { data <- data.frame( criticalValues = designPlan$criticalValuesEffectScaleUpper[, 1], criticalValuesMirrored = designPlan$criticalValuesEffectScaleLower[, 1], futilityBounds = c( designPlan$futilityBoundsEffectScaleUpper[, 1], designPlan$criticalValuesEffectScaleUpper[designMaster$kMax, 1] ), futilityBoundsMirrored = c( designPlan$futilityBoundsEffectScaleLower[, 1], designPlan$criticalValuesEffectScaleLower[designMaster$kMax, 1] ) ) yParameterNamesSrc <- c(yParameterNamesSrc, "criticalValuesEffectScaleUpper[, 1]") yParameterNamesSrc <- c(yParameterNamesSrc, "criticalValuesEffectScaleLower[, 1]") yParameterNamesSrc <- c(yParameterNamesSrc, paste0( "c(", designPlanName, "$futilityBoundsEffectScaleUpper[, 1], ", designPlanName, "$criticalValuesEffectScaleUpper[nrow(", designPlanName, "$criticalValuesEffectScaleUpper), 1])" )) yParameterNamesSrc <- c(yParameterNamesSrc, paste0( "c(", designPlanName, "$futilityBoundsEffectScaleLower[, 1], ", designPlanName, "$criticalValuesEffectScaleLower[nrow(", designPlanName, "$criticalValuesEffectScaleLower), 1])" )) groupedPlotEnabled <- TRUE } else { data <- data.frame( criticalValuesEffectScale = designPlan$criticalValuesEffectScaleUpper[, 1], criticalValuesEffectScaleMirrored = designPlan$criticalValuesEffectScaleLower[, 1] ) yParameterNamesSrc <- c(yParameterNamesSrc, "criticalValuesEffectScaleUpper[, 1]") yParameterNamesSrc <- c(yParameterNamesSrc, "criticalValuesEffectScaleLower[, 1]") } if (survivalDesignPlanEnabled) { xParameterName <- "eventsPerStage" xParameterNameSrc <- paste0(designPlanName, "$", xParameterName, "[, 1]") data <- cbind(data.frame(eventsPerStage = designPlan$eventsPerStage[, 1]), data) } else { xParameterName <- "informationRates" xParameterNameSrc <- paste0(designPlanName, "$.design$", xParameterName) data <- cbind(data.frame(informationRates = designMaster$informationRates), data) } if (designMaster$sided == 1 || designMaster$typeOfDesign == C_TYPE_OF_DESIGN_PT) { if (any(designMaster$futilityBounds > C_FUTILITY_BOUNDS_DEFAULT)) { yParameterNames <- c("futilityBoundsEffectScale", "criticalValuesEffectScale") } else { yParameterNames <- "criticalValuesEffectScale" } } else { yParameterNames <- c("criticalValuesEffectScale", "criticalValuesEffectScaleMirrored") } if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) if (groupedPlotEnabled) { tableColumnNames <- C_TABLE_COLUMN_NAMES criticalValuesName <- designPlan$.getDataFrameColumnCaption("criticalValuesEffectScale", tableColumnNames, TRUE) futilityBoundsName <- designPlan$.getDataFrameColumnCaption("futilityBoundsEffectScale", tableColumnNames, TRUE) designPlan <- data.frame( xValues = rep(data[[xParameterName]], 4), yValues = c( data$criticalValues, data$criticalValuesMirrored, data$futilityBounds, data$futilityBoundsMirrored ), categories = c( rep(criticalValuesName, nrow(data)), rep("criticalValuesMirrored", nrow(data)), rep(futilityBoundsName, nrow(data)), rep("futilityBoundsMirrored", nrow(data)) ), groups = c(rep(criticalValuesName, 2 * nrow(data)), rep(futilityBoundsName, 2 * nrow(data))) ) } else { designPlan <- data } } else if (type == 3) { # Stage Levels if (is.na(main)) { main <- PlotSubTitleItems(title = "Boundaries p Values Scale") .addPlotSubTitleItems(designPlan, designMaster, main, type) if (!is.null(reducedParam)) { main$add(reducedParam$title, reducedParam$value, reducedParam$subscript) } } if (survivalDesignPlanEnabled) { xParameterName <- "eventsPerStage" yParameterNames <- "stageLevels" designPlan <- data.frame( eventsPerStage = designPlan$eventsPerStage[, 1], stageLevels = designMaster$stageLevels ) xParameterNameSrc <- "eventsPerStage[, 1]" yParameterNamesSrc <- ".design$stageLevels" } else { xParameterName <- "informationRates" yParameterNames <- "stageLevels" designPlan <- TrialDesignSet(design = designMaster, singleDesign = TRUE) xParameterNameSrc <- ".design$informationRates" yParameterNamesSrc <- ".design$stageLevels" } srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 4) { # Alpha Spending if (is.na(main)) { main <- PlotSubTitleItems(title = "Error Spending") .addPlotSubTitleItems(designPlan, designMaster, main, type) if (!is.null(reducedParam)) { main$add(reducedParam$title, reducedParam$value, reducedParam$subscript) } } if (survivalDesignPlanEnabled) { xParameterName <- "eventsPerStage" yParameterNames <- "alphaSpent" designPlan <- data.frame( eventsPerStage = designPlan$eventsPerStage[, 1], alphaSpent = designMaster$alphaSpent ) xParameterNameSrc <- "eventsPerStage[, 1]" yParameterNamesSrc <- ".design$alphaSpent" } else { xParameterName <- "informationRates" yParameterNames <- "alphaSpent" designPlan <- TrialDesignSet(design = designMaster, singleDesign = TRUE) xParameterNameSrc <- ".design$informationRates" yParameterNamesSrc <- ".design$alphaSpent" } plotPointsEnabled <- ifelse(is.na(plotPointsEnabled), FALSE, plotPointsEnabled) srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 5) { # Power and Stopping Probabilities .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (designPlan$.isSampleSizeObject()) { if (is.na(main)) { main <- PlotSubTitleItems(title = "Sample Size") .addPlotSubTitleItems(designPlan, designMaster, main, type) } yAxisScalingEnabled <- TRUE if (.isTrialDesignPlanMeans(designPlan)) { xParameterName <- "alternative" yParameterNames <- c("nFixed") if (designMaster$kMax > 1) { yParameterNames <- c(yParameterNames, "maxNumberOfSubjects", "expectedNumberOfSubjectsH1") } if (is.na(ylab)) { ylab <- "Sample Size" } yAxisScalingEnabled <- FALSE if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } yParameterNamesSrc <- yParameterNames } else if (.isTrialDesignPlanRates(designPlan)) { xParameterName <- "pi1" yParameterNames <- c("nFixed") if (designMaster$kMax > 1) { yParameterNames <- c(yParameterNames, "maxNumberOfSubjects", "expectedNumberOfSubjectsH1") } if (is.na(ylab)) { ylab <- "Sample Size" } yAxisScalingEnabled <- FALSE if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } yParameterNamesSrc <- yParameterNames } else if (survivalDesignPlanEnabled) { designPlan <- data.frame( hazardRatio = designPlan$hazardRatio, eventsFixed = designPlan$eventsFixed, maxNumberOfEvents = designPlan$eventsPerStage[designMaster$kMax, ], expectedEventsH1 = designPlan$expectedEventsH1 ) xParameterName <- "hazardRatio" yParameterNames <- c("eventsFixed") if (designMaster$kMax > 1) { yParameterNames <- c(yParameterNames, "maxNumberOfEvents", "expectedEventsH1") } if (is.na(ylab)) { ylab <- "# Events" } if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_TOP } yParameterNamesSrc <- c( "eventsFixed", paste0("eventsPerStage[", designMaster$kMax, ", ]"), "expectedEventsH1" ) } srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } return(.plotParameterSet( parameterSet = designPlan, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = FALSE, yAxisScalingEnabled = FALSE, plotSettings = plotSettings # , ... )) } else { if (is.na(main)) { main <- PlotSubTitleItems(title = "Overall Power and Early Stopping") .addPlotSubTitleItems(designPlan, designMaster, main, type) } if (survivalDesignPlanEnabled) { xParameterName <- "hazardRatio" } else { xParameterName <- "effect" } yParameterNames <- c("overallReject", "futilityStop", "earlyStop") if (is.na(ylab)) { ylab <- "" } if (is.na(legendPosition)) { legendPosition <- C_POSITION_LEFT_TOP } srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } if (is.null(list(...)[["ylim"]])) { ylim <- c(0, 1) return(.plotParameterSet( parameterSet = designPlan, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = FALSE, yAxisScalingEnabled = FALSE, plotSettings = plotSettings, ylim = ylim # , ... )) } else { return(.plotParameterSet( parameterSet = designPlan, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = FALSE, yAxisScalingEnabled = FALSE, plotSettings = plotSettings # , ... )) } } } else if (type == 6) { # Average Sample Size / Average Event Number .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { titlePart <- ifelse(survivalDesignPlanEnabled, "Number of Events", "Sample Size") main <- PlotSubTitleItems(title = paste0("Expected ", titlePart, " and Power / Early Stop")) .addPlotSubTitleItems(designPlan, designMaster, main, type) } if (survivalDesignPlanEnabled) { xParameterName <- "hazardRatio" yParameterNames <- "expectedNumberOfEvents" expectedNumberOfEvents <- designPlan[["expectedNumberOfEvents"]] if (is.null(expectedNumberOfEvents) || length(expectedNumberOfEvents) == 0) { yParameterNames <- "expectedEventsH1" } yParameterNames <- c(yParameterNames, "overallReject", "earlyStop") # overallReject = power if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_CENTER } } else { xParameterName <- "effect" yParameterNames <- c("expectedNumberOfSubjects", "overallReject", "earlyStop") # overallReject = power } srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 7) { .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { main <- PlotSubTitleItems(title = "Overall Power") .addPlotSubTitleItems(designPlan, designMaster, main, type) } if (survivalDesignPlanEnabled) { xParameterName <- "hazardRatio" } else { xParameterName <- "effect" } yParameterNames <- "overallReject" if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_CENTER } srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 8) { .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { main <- PlotSubTitleItems(title = "Overall Early Stopping") .addPlotSubTitleItems(designPlan, designMaster, main, type) } if (survivalDesignPlanEnabled) { xParameterName <- "hazardRatio" } else { xParameterName <- "effect" } yParameterNames <- c("earlyStop", "futilityStop") if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_CENTER } srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 9) { .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { if (survivalDesignPlanEnabled) { main <- PlotSubTitleItems(title = "Expected Number of Events") } else { main <- PlotSubTitleItems(title = "Expected Sample Size") } .addPlotSubTitleItems(designPlan, designMaster, main, type) } if (survivalDesignPlanEnabled) { xParameterName <- "hazardRatio" yParameterNames <- "expectedNumberOfEvents" expectedNumberOfEvents <- designPlan[["expectedNumberOfEvents"]] if (is.null(expectedNumberOfEvents) || length(expectedNumberOfEvents) == 0) { yParameterNames <- c("expectedEventsH0", "expectedEventsH1") if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_CENTER } } } else { xParameterName <- "effect" yParameterNames <- "expectedNumberOfSubjects" } srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (survivalDesignPlanEnabled) { if (type == 10) { # Study Duration .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { main <- PlotSubTitleItems(title = "Study Duration") .addPlotSubTitleItems(designPlan, designMaster, main, type) } xParameterName <- "hazardRatio" yParameterNames <- "studyDuration" srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 11) { .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { main <- PlotSubTitleItems(title = "Expected Number of Subjects") .addPlotSubTitleItems(designPlan, designMaster, main, type) } xParameterName <- "hazardRatio" yParameterNames <- "expectedNumberOfSubjects" srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 12) { # Analysis Time .assertIsValidVariedParameterVectorForPlotting(designPlan, type) if (is.na(main)) { main <- PlotSubTitleItems(title = "Analysis Time") .addPlotSubTitleItems(designPlan, designMaster, main, type) } xParameterName <- "hazardRatio" yParameterNames <- "analysisTime" yParameterNamesSrc <- c() for (i in 1:nrow(designPlan[["analysisTime"]])) { yParameterNamesSrc <- c(yParameterNamesSrc, paste0("analysisTime[", i, ", ]")) } data <- NULL for (k in 1:designMaster$kMax) { part <- data.frame( categories = rep(k, length(designPlan$hazardRatio)), xValues = designPlan$hazardRatio, yValues = designPlan$analysisTime[k, ] ) if (is.null(data)) { data <- part } else { data <- rbind(data, part) } } srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = xParameterName, yParameterNames = yParameterNamesSrc, hint = showSourceHint, type = type, showSource = showSource ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } return(.plotDataFrame(data, mainTitle = main, xlab = NA_character_, ylab = NA_character_, xAxisLabel = "Hazard Ratio", yAxisLabel1 = "Analysis Time", yAxisLabel2 = NA_character_, plotPointsEnabled = TRUE, legendTitle = "Stage", legendPosition = legendPosition, sided = designMaster$sided, plotSettings = plotSettings, ... )) } else if (type == 13 || type == 14) { # Cumulative Distribution Function / Survival function return(.plotSurvivalFunction(designPlan, designMaster = designMaster, type = type, main = main, xlab = xlab, ylab = ylab, palette = palette, legendPosition = legendPosition, showSource = showSource, designPlanName = designPlanName, plotSettings = plotSettings, ... )) } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 2, ..., 14") } } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 2, ..., 9") } if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } p <- .plotParameterSet( parameterSet = designPlan, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = (type != 2), ratioEnabled = ratioEnabled, plotSettings = plotSettings # , ... ) if (type == 1 && survivalDesignPlanEnabled) { p <- .addDecistionCriticalValuesToPlot(p = p, designMaster = designMaster, type = type, nMax = nMax) } return(p) } .getSurvivalFunctionPlotCommand <- function(functionType = c("pwExpDist", "lambdaStep"), timeValues, lambda, designPlan, type, piecewiseSurvivalEnabled, multiplyByHazardRatio = FALSE) { functionType <- match.arg(functionType) signPrefix <- ifelse(type == 13, "", "-") if (functionType == "pwExpDist") { functionName <- "getPiecewiseExponentialDistribution" } else { functionName <- "getLambdaStepFunction" } cmd <- paste0( signPrefix, functionName, "(", .reconstructSequenceCommand(timeValues), ", piecewiseLambda = ", .arrayToString(lambda, vectorLookAndFeelEnabled = TRUE) ) if (piecewiseSurvivalEnabled) { cmd <- paste0( cmd, ", piecewiseSurvivalTime = ", .arrayToString(designPlan$piecewiseSurvivalTime, vectorLookAndFeelEnabled = TRUE) ) } if (functionType == "pwExpDist") { cmd <- paste0(cmd, ", kappa = ", designPlan$kappa) } cmd <- paste0(cmd, ")") if (multiplyByHazardRatio) { cmd <- paste0(cmd, " * ", designPlan$hazardRatio[1]) } return(cmd) } # Cumulative Distribution Function / Survival function .plotSurvivalFunction <- function(designPlan, ..., designMaster, type = c(13, 14), main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", legendPosition = NA_integer_, showSource = FALSE, designPlanName = NA_character_, plotSettings = NULL) { startTime <- Sys.time() if (is.null(designPlan$piecewiseSurvivalTime) || length(designPlan$piecewiseSurvivalTime) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'piecewiseSurvivalTime' must be specified") } type <- type[1] if (!(type %in% c(13, 14))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' must be 13 or 14") } lambda1 <- designPlan[["lambda1"]] lambda2 <- designPlan[["lambda2"]] if (is.null(lambda2) || length(lambda2) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'lambda2' must be specified") } if (is.null(designPlan$kappa) || length(designPlan$kappa) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'kappa' must be specified") } if (is.null(designPlan$hazardRatio) || length(designPlan$hazardRatio) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'hazardRatio' must be specified") } piecewiseSurvivalEnabled <- designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled if (is.na(main)) { if (type == 13) { main <- PlotSubTitleItems(title = "Cumulative Distribution Function") } else { main <- PlotSubTitleItems(title = "Survival Function") } .addPlotSubTitleItems(designPlan, designMaster, main, type) if (!piecewiseSurvivalEnabled) { if (designPlan$.piecewiseSurvivalTime$.isLambdaBased(minNumberOfLambdas = 1)) { main$add("lambda", round(designPlan$lambda1[1], 4), 1) main$add("lambda", round(designPlan$lambda2, 4), 2) } else { main$add("pi", round(designPlan$pi1[1], 3), 1) main$add("pi", round(designPlan$pi2, 3), 2) } } else if (length(designPlan$hazardRatio) == 1) { main$add("Hazard Ratio", round(designPlan$hazardRatio[1], 3)) } } if (!piecewiseSurvivalEnabled || (length(designPlan$piecewiseSurvivalTime) == 1 && designPlan$piecewiseSurvivalTime[1] == 0)) { timeTo <- max(designPlan$analysisTime[designMaster$kMax, ]) } else { timeTo <- max(designPlan$piecewiseSurvivalTime) } if (is.na(timeTo) || !is.numeric(timeTo) || is.infinite(timeTo)) { # warning("Unable to determine upper bound of time values", call. = FALSE) timeTo <- 0 } timeTo <- timeTo + 10 by <- timeTo / 1000 timeValues <- seq(from = 0, to = timeTo, by = by) data <- data.frame( time = timeValues, lambdaGroup1 = rep(-1, length(timeValues)), lambdaGroup2 = rep(-1, length(timeValues)), survival1 = rep(-1, length(timeValues)), survival2 = rep(-1, length(timeValues)), survivalGroup1 = rep(-1, length(timeValues)), survivalGroup2 = rep(-1, length(timeValues)) ) signPrefix <- ifelse(type == 13, "", "-") if (piecewiseSurvivalEnabled) { data$survival2 <- .getPiecewiseExponentialDistribution( timeValues, lambda2, designPlan$piecewiseSurvivalTime, designPlan$kappa ) yParameterNames <- .getSurvivalFunctionPlotCommand( "pwExpDist", timeValues, lambda2, designPlan, type, piecewiseSurvivalEnabled ) if (!is.null(lambda1) && !is.na(lambda1) && length(lambda1) == length(lambda2)) { data$survival1 <- .getPiecewiseExponentialDistribution( timeValues, lambda1, designPlan$piecewiseSurvivalTime, designPlan$kappa ) yParameterNames <- c( yParameterNames, .getSurvivalFunctionPlotCommand( "pwExpDist", timeValues, lambda1, designPlan, type, piecewiseSurvivalEnabled ) ) } else { .warnInCaseOfUnusedValuesForPlottingSurvival(designPlan$hazardRatio) data$survival1 <- data$survival2 * designPlan$hazardRatio[1] yParameterNames <- c( yParameterNames, .getSurvivalFunctionPlotCommand("pwExpDist", timeValues, lambda2, designPlan, type, piecewiseSurvivalEnabled, multiplyByHazardRatio = TRUE ) ) } yParameterNames <- c( yParameterNames, .getSurvivalFunctionPlotCommand( "lambdaStep", timeValues, lambda2, designPlan, type, piecewiseSurvivalEnabled ) ) if (!is.null(lambda1) && !is.na(lambda1) && length(lambda1) == length(lambda2)) { yParameterNames <- c( yParameterNames, .getSurvivalFunctionPlotCommand( "lambdaStep", timeValues, lambda1, designPlan, type, piecewiseSurvivalEnabled ) ) } else { yParameterNames <- c( yParameterNames, .getSurvivalFunctionPlotCommand("lambdaStep", timeValues, lambda2, designPlan, type, piecewiseSurvivalEnabled, multiplyByHazardRatio = TRUE ) ) } } else { if (designPlan$.piecewiseSurvivalTime$.isLambdaBased(minNumberOfLambdas = 1)) { if (length(designPlan$lambda1) > 1) { lambda1 <- designPlan$lambda1[1] warning("Only the first 'lambda1' (", round(lambda1, 4), ") was used for plotting", call. = FALSE ) } } else { .warnInCaseOfUnusedValuesForPlottingRates(designPlan$pi1) } if (!is.na(designPlan$pi1[1]) && !is.na(designPlan$pi2) && !is.na(designPlan$eventTime)) { lambda2 <- (-log(1 - designPlan$pi2))^(1 / designPlan$kappa) / designPlan$eventTime lambda1 <- (-log(1 - designPlan$pi1[1]))^(1 / designPlan$kappa) / designPlan$eventTime } data$survival2 <- .getPiecewiseExponentialDistribution( timeValues, lambda2, 0, designPlan$kappa ) data$survival1 <- .getPiecewiseExponentialDistribution( timeValues, lambda1, 0, designPlan$kappa ) yParameterNames <- .getSurvivalFunctionPlotCommand( "pwExpDist", timeValues, lambda2, designPlan, type, piecewiseSurvivalEnabled ) yParameterNames <- c( yParameterNames, .getSurvivalFunctionPlotCommand( "pwExpDist", timeValues, lambda1, designPlan, type, piecewiseSurvivalEnabled ) ) yParameterNames <- c( yParameterNames, .getSurvivalFunctionPlotCommand( "lambdaStep", timeValues, lambda2, designPlan, type, piecewiseSurvivalEnabled ) ) yParameterNames <- c( yParameterNames, .getSurvivalFunctionPlotCommand( "lambdaStep", timeValues, lambda1, designPlan, type, piecewiseSurvivalEnabled ) ) } # two groups: 1 = treatment, 2 = control if (type == 14) { data$survival1 <- 1 - data$survival1 data$survival2 <- 1 - data$survival2 } if (piecewiseSurvivalEnabled) { data$lambdaGroup2 <- .getLambdaStepFunction( timeValues, designPlan$piecewiseSurvivalTime, lambda2 ) if (length(lambda1) == 1) { if (!is.na(lambda1)) { data$lambdaGroup1 <- rep(lambda1, length(data$lambdaGroup2)) } else { data$lambdaGroup1 <- data$lambdaGroup2 * designPlan$hazardRatio[1] } } else { data$lambdaGroup1 <- .getLambdaStepFunction( timeValues, designPlan$piecewiseSurvivalTime, lambda1 ) } } else { data$lambdaGroup2 <- .getLambdaStepFunction(timeValues, 0, lambda2) data$lambdaGroup1 <- .getLambdaStepFunction(timeValues, 0, lambda1) } scalingBaseValues1 <- na.omit(c(data$survival1, data$survival2)) scalingBaseValues2 <- na.omit(c(data$lambdaGroup1, data$lambdaGroup2)) scalingFactor <- 1 if (length(scalingBaseValues1) > 0 && length(scalingBaseValues2) > 0) { scalingFactor <- max(scalingBaseValues1) / max(.getNextHigherValue(scalingBaseValues2)) } data2 <- data.frame( categories = c( rep("Treatm. piecew. exp.", nrow(data)), rep("Control piecew. exp.", nrow(data)), rep("Treatm. piecew. lambda", nrow(data)), rep("Control piecew. lambda", nrow(data)) ), xValues = rep(data$time, 4), yValues = c( data$survival1, data$survival2, data$lambdaGroup1 * scalingFactor, data$lambdaGroup2 * scalingFactor ) ) if (is.na(legendPosition)) { if (type == 13) { legendPosition <- C_POSITION_LEFT_TOP } else { legendPosition <- C_POSITION_RIGHT_TOP } } if (is.na(palette) || palette == "Set1") { palette <- "Paired" } if (type == 13) { yAxisLabel1 <- "Cumulative Distribution Function" } else { yAxisLabel1 <- "Survival Function" } srcCmd <- .showPlotSourceInformation( objectName = designPlanName, xParameterName = "time", yParameterNames = yParameterNames, showSource = showSource, xValues = timeValues ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } if (is.null(plotSettings)) { plotSettings <- designPlan$.plotSettings } return(.plotDataFrame(data2, mainTitle = main, xlab = xlab, ylab = ylab, xAxisLabel = "Time", yAxisLabel1 = yAxisLabel1, yAxisLabel2 = "Lambda", plotPointsEnabled = FALSE, legendTitle = NA_character_, legendPosition = legendPosition, scalingFactor1 = 1, scalingFactor2 = scalingFactor, palette = palette, sided = designMaster$sided, plotSettings = plotSettings )) } .warnInCaseOfUnusedValuesForPlottingMeans <- function(alternative) { if (length(alternative) > 1) { warning("Only the first 'alternative' (", round(alternative[1], 3), ") was used for plotting", call. = FALSE ) return(list(title = "alternative", value = alternative[1], subscript = NA_character_)) } return(NULL) } .warnInCaseOfUnusedValuesForPlottingRates <- function(pi1) { if (length(pi1) > 1) { warning("Only the first 'pi1' (", round(pi1[1], 3), ") was used for plotting", call. = FALSE ) return(list(title = "pi", value = pi1[1], subscript = "1")) } return(NULL) } .warnInCaseOfUnusedValuesForPlottingSurvival <- function(hazardRatio) { if (length(hazardRatio) > 1) { warning("Only the first 'hazardRatio' (", round(hazardRatio[1], 3), ") was used for plotting", call. = FALSE ) return(list(title = "hazardRatio", value = hazardRatio[1], subscript = NA_character_)) } return(NULL) } .warnInCaseOfUnusedValuesForPlotting <- function(designPlan) { if (.isTrialDesignPlanMeans(designPlan) && designPlan$.isSampleSizeObject()) { return(.warnInCaseOfUnusedValuesForPlottingMeans(designPlan$alternative)) } if (.isTrialDesignPlanRates(designPlan) && designPlan$.isSampleSizeObject()) { return(.warnInCaseOfUnusedValuesForPlottingRates(designPlan$pi1)) } if (.isTrialDesignPlanSurvival(designPlan) && designPlan$.isSampleSizeObject()) { return(.warnInCaseOfUnusedValuesForPlottingSurvival(designPlan$hazardRatio)) } return(NULL) } #' #' @title #' Trial Design Plan Plotting #' #' @param x The trial design plan, obtained from \cr #' \code{\link[=getSampleSizeMeans]{getSampleSizeMeans()}}, \cr #' \code{\link[=getSampleSizeRates]{getSampleSizeRates()}}, \cr #' \code{\link[=getSampleSizeSurvival]{getSampleSizeSurvival()}}, \cr #' \code{\link[=getPowerMeans]{getPowerMeans()}}, \cr #' \code{\link[=getPowerRates]{getPowerRates()}} or \cr #' \code{\link[=getPowerSurvival]{getPowerSurvival()}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @inheritParams param_palette #' @inheritParams param_theta #' @inheritParams param_plotPointsEnabled #' @inheritParams param_showSource #' @inheritParams param_plotSettings #' @inheritParams param_legendPosition #' @inheritParams param_grid #' @param type The plot type (default = \code{1}). The following plot types are available: #' \itemize{ #' \item \code{1}: creates a 'Boundaries' plot #' \item \code{2}: creates a 'Boundaries Effect Scale' plot #' \item \code{3}: creates a 'Boundaries p Values Scale' plot #' \item \code{4}: creates a 'Error Spending' plot #' \item \code{5}: creates a 'Sample Size' or 'Overall Power and Early Stopping' plot #' \item \code{6}: creates a 'Number of Events' or 'Sample Size' plot #' \item \code{7}: creates an 'Overall Power' plot #' \item \code{8}: creates an 'Overall Early Stopping' plot #' \item \code{9}: creates an 'Expected Number of Events' or 'Expected Sample Size' plot #' \item \code{10}: creates a 'Study Duration' plot #' \item \code{11}: creates an 'Expected Number of Subjects' plot #' \item \code{12}: creates an 'Analysis Times' plot #' \item \code{13}: creates a 'Cumulative Distribution Function' plot #' \item \code{14}: creates a 'Survival Function' plot #' \item \code{"all"}: creates all available plots and returns it as a grid plot or list #' } #' @inheritParams param_three_dots_plot #' #' @description #' Plots a trial design plan. #' #' @details #' Generic function to plot all kinds of trial design plans. #' #' @examples #' \dontrun{ #' if (require(ggplot2)) plot(getSampleSizeMeans()) #' } #' #' @template return_object_ggplot #' #' @export #' plot.TrialDesignPlan <- function(x, y, ..., main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = ifelse(x$.design$kMax == 1, 5L, 1L), palette = "Set1", theta = seq(-1, 1, 0.01), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, grid = 1, plotSettings = NULL) { fCall <- match.call(expand.dots = FALSE) designPlanName <- deparse(fCall$x) .assertGgplotIsInstalled() .assertIsSingleInteger(grid, "grid", validateType = FALSE) nMax <- list(...)[["nMax"]] if (!is.null(nMax)) { warning( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'nMax' (", nMax, ") will be ignored because it will be taken from design plan" ) } typeNumbers <- .getPlotTypeNumber(type, x) if (is.null(plotSettings)) { plotSettings <- .getGridPlotSettings(x, typeNumbers, grid) } p <- NULL plotList <- list() for (typeNumber in typeNumbers) { p <- .plotTrialDesignPlan( designPlan = x, main = main, xlab = xlab, ylab = ylab, type = typeNumber, palette = palette, theta = theta, plotPointsEnabled = plotPointsEnabled, legendPosition = .getGridLegendPosition(legendPosition, typeNumbers, grid), showSource = showSource, designPlanName = designPlanName, plotSettings = plotSettings, ... ) .printPlotShowSourceSeparator(showSource, typeNumber, typeNumbers) if (length(typeNumbers) > 1) { caption <- .getPlotCaption(x, typeNumber, stopIfNotFound = TRUE) plotList[[caption]] <- p } } if (length(typeNumbers) == 1) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(p)) } return(p) } if (length(plotList) == 0) { message("No plots available for the specified design plan for ", x$.toString()) } if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(plotList)) } return(.createPlotResultObject(plotList, grid)) } rpact/R/class_design_power_and_asn.R0000644000176200001440000003521214445307575017265 0ustar liggesusers## | ## | *Power and average sample number result classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' #' @name PowerAndAverageSampleNumberResult #' #' @title #' Power and Average Sample Number Result #' #' @description #' Class for power and average sample number (ASN) results. #' #' @template field_nMax #' @template field_theta #' @template field_averageSampleNumber #' @template field_calculatedPower #' @template field_overallEarlyStop #' @template field_earlyStop #' @template field_overallReject #' @template field_rejectPerStage #' @template field_overallFutility #' @template field_futilityPerStage #' #' @details #' This object cannot be created directly; use \code{\link[=getPowerAndAverageSampleNumber]{getPowerAndAverageSampleNumber()}} #' with suitable arguments to create it. #' #' @include class_core_parameter_set.R #' @include class_design.R #' #' @keywords internal #' #' @importFrom methods new #' PowerAndAverageSampleNumberResult <- setRefClass("PowerAndAverageSampleNumberResult", contains = "ParameterSet", fields = list( .design = "TrialDesign", nMax = "numeric", theta = "numeric", averageSampleNumber = "numeric", calculatedPower = "numeric", overallEarlyStop = "numeric", earlyStop = "matrix", overallReject = "numeric", rejectPerStage = "matrix", overallFutility = "numeric", futilityPerStage = "matrix" ), methods = list( initialize = function(design, theta = seq(-1, 1, 0.05), nMax = 100L, ...) { callSuper(.design = design, theta = theta, nMax = nMax, ...) theta <<- .assertIsValidThetaRange(thetaRange = theta, thetaAutoSeqEnabled = FALSE) .initPowerAndAverageSampleNumber() .parameterNames <<- .getParameterNames(design = design) .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, clone = function() { return(PowerAndAverageSampleNumberResult(design = .self$.design, theta = .self$theta, nMax = .self$nMax)) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing a power and average sample size (ASN) result" .resetCat() if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Power and average sample size (ASN):\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), "Output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) if (.design$kMax > 1) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled ) if (.design$kMax > 1) { .cat(" [k]: values at stage k\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } } }, .toString = function(startWithUpperCase = FALSE) { s <- "power and average sample size (ASN)" return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) }, .initPowerAndAverageSampleNumber = function() { .assertIsTrialDesignInverseNormalOrGroupSequential(.design) .assertIsValidSidedParameter(.design$sided) if (nMax <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'nMax' must be an integer > 0") } .setParameterType("nMax", ifelse(nMax == C_NA_MAX_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) thetaIsDefault <- length(theta) == length(C_POWER_ASN_THETA_DEFAULT) && sum(theta == C_POWER_ASN_THETA_DEFAULT) == length(theta) .setParameterType("theta", ifelse(thetaIsDefault, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) kMax <- .design$kMax # initialization numberOfThetas <- length(theta) averageSampleNumber <<- rep(NA_real_, numberOfThetas) .setParameterType("averageSampleNumber", C_PARAM_GENERATED) calculatedPower <<- rep(NA_real_, numberOfThetas) .setParameterType("calculatedPower", C_PARAM_GENERATED) earlyStop <<- matrix(NA_real_, kMax, numberOfThetas) .setParameterType("earlyStop", C_PARAM_GENERATED) rejectPerStage <<- matrix(NA_real_, kMax, numberOfThetas) .setParameterType("rejectPerStage", C_PARAM_GENERATED) futilityPerStage <<- matrix(NA_real_, kMax - 1, numberOfThetas) .setParameterType("futilityPerStage", C_PARAM_GENERATED) rowNames <- paste("stage =", c(1:kMax)) rownames(earlyStop) <<- rowNames rownames(rejectPerStage) <<- rowNames if (kMax > 1) { rownames(futilityPerStage) <<- rowNames[1:(kMax - 1)] } for (i in 1:numberOfThetas) { result <- .getPowerAndAverageSampleNumber(theta = theta[i]) averageSampleNumber[i] <<- result$averageSampleNumber calculatedPower[i] <<- result$calculatedPower earlyStop[1:(kMax - 1), i] <<- result$earlyStop[1:(kMax - 1)] rejectPerStage[, i] <<- result$rejectPerStage[1:kMax] futilityPerStage[, i] <<- result$futilityPerStage[1:(kMax - 1)] } overallEarlyStop <<- .getOverallParameter(earlyStop) .setParameterType("overallEarlyStop", C_PARAM_GENERATED) overallReject <<- .getOverallParameter(rejectPerStage) .setParameterType("overallReject", C_PARAM_GENERATED) overallFutility <<- .getOverallParameter(futilityPerStage) .setParameterType("overallFutility", C_PARAM_GENERATED) }, .getPowerAndAverageSampleNumber = function(theta) { kMax <- .design$kMax futilityBounds <- .design$futilityBounds informationRates <- .design$informationRates criticalValues <- .design$criticalValues sided <- .design$sided delayedInformation <- .design$delayedInformation .earlyStop <- rep(NA_real_, kMax) .futilityPerStage <- rep(NA_real_, kMax) if (!any(is.na(delayedInformation))) { contRegionLower <- futilityBounds contRegionUpper <- criticalValues decisionCriticalValues <- .design$decisionCriticalValues probs <- .calculateDecisionProbabilities( sqrtShift = sqrt(nMax) * theta, informationRates, delayedInformation, contRegionUpper, contRegionLower, decisionCriticalValues ) .averageSampleNumber <- nMax - sum(probs$stoppingProbabilities * (informationRates[kMax] - delayedInformation - informationRates[1:(kMax - 1)]) * nMax) .calculatedPower <- probs$power[kMax] .rejectPerStage <- probs$rejectionProbabilities .earlyStop <- probs$stoppingProbabilities .futilityPerStage <- probs$futilityProbabilities } else { if (sided == 2) { if (.design$typeOfDesign == C_TYPE_OF_DESIGN_PT || !is.null(.design$typeBetaSpending) && .design$typeBetaSpending != "none") { futilityBounds[is.na(futilityBounds)] <- 0 decisionMatrix <- matrix(c( -criticalValues - theta * sqrt(nMax * informationRates), c(-futilityBounds - theta * sqrt(nMax * informationRates[1:(kMax - 1)]), 0), c(futilityBounds - theta * sqrt(nMax * informationRates[1:(kMax - 1)]), 0), criticalValues - theta * sqrt(nMax * informationRates) ), nrow = 4, byrow = TRUE) } else { decisionMatrix <- matrix(c( -criticalValues - theta * sqrt(nMax * informationRates), criticalValues - theta * sqrt(nMax * informationRates) ), nrow = 2, byrow = TRUE) } } else { shiftedFutilityBounds <- futilityBounds - theta * sqrt(nMax * informationRates[1:(kMax - 1)]) shiftedFutilityBounds[futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- C_FUTILITY_BOUNDS_DEFAULT decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, criticalValues - theta * sqrt(nMax * informationRates) ), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) if (nrow(probs) == 3) { .averageSampleNumber <- nMax - sum((probs[3, 1:(kMax - 1)] - probs[2, 1:(kMax - 1)] + probs[1, 1:(kMax - 1)]) * (informationRates[kMax] - informationRates[1:(kMax - 1)]) * nMax) } else { .averageSampleNumber <- nMax - sum((probs[5, 1:(kMax - 1)] - probs[4, 1:(kMax - 1)] + probs[3, 1:(kMax - 1)] - probs[2, 1:(kMax - 1)] + probs[1, 1:(kMax - 1)]) * (informationRates[kMax] - informationRates[1:(kMax - 1)]) * nMax) } if (sided == 2) { if (nrow(probs) == 3) { .calculatedPower <- sum(probs[3, 1:kMax] - probs[2, 1:kMax] + probs[1, 1:kMax]) .rejectPerStage <- probs[3, 1:kMax] - probs[2, 1:kMax] + probs[1, 1:kMax] } else { .calculatedPower <- sum(probs[5, 1:kMax] - probs[4, 1:kMax] + probs[1, 1:kMax]) .rejectPerStage <- probs[5, 1:kMax] - probs[4, 1:kMax] + probs[1, 1:kMax] if (kMax > 1) { .futilityPerStage <- probs[3, 1:kMax] - probs[2, 1:kMax] } } } else { .calculatedPower <- sum(probs[3, 1:kMax] - probs[2, 1:kMax]) .rejectPerStage <- probs[3, 1:kMax] - probs[2, 1:kMax] if (kMax > 1) { .futilityPerStage <- probs[1, 1:(kMax - 1)] .rejectPerStage <- .getNoEarlyEfficacyZeroCorrectedValues(.design, .rejectPerStage) } } if (kMax > 1) { if (nrow(probs) == 3) { .earlyStop <- probs[3, 1:(kMax - 1)] - probs[2, 1:(kMax - 1)] + probs[1, 1:(kMax - 1)] } else { .earlyStop <- probs[5, 1:(kMax - 1)] - probs[4, 1:(kMax - 1)] + probs[3, 1:(kMax - 1)] - probs[2, 1:(kMax - 1)] + probs[1, 1:(kMax - 1)] } } } return(list( averageSampleNumber = .averageSampleNumber, calculatedPower = .calculatedPower, earlyStop = .earlyStop, rejectPerStage = .rejectPerStage, futilityPerStage = .futilityPerStage )) }, .getOverallParameter = function(parameter) { if (is.null(parameter) || length(parameter) == 0) { return(rep(NA_real_, length(theta))) } overallParameter <- parameter overallParameter[is.na(overallParameter)] <- 0 overallParameter <- colSums(overallParameter) return(overallParameter) } ) ) #' #' @title #' Coerce Power And Average Sample Number Result to a Data Frame #' #' @description #' Returns the \code{\link{PowerAndAverageSampleNumberResult}} as data frame. #' #' @param x A \code{\link{PowerAndAverageSampleNumberResult}} object. #' @inheritParams param_niceColumnNamesEnabled #' @inheritParams param_includeAllParameters #' @inheritParams param_three_dots #' #' @details #' Coerces the \code{\link{PowerAndAverageSampleNumberResult}} object to a data frame. #' #' @template return_dataframe #' #' @examples #' data <- as.data.frame(getPowerAndAverageSampleNumber(getDesignGroupSequential())) #' head(data) #' dim(data) #' #' @export #' #' @keywords internal #' as.data.frame.PowerAndAverageSampleNumberResult <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ...) { parameterNames <- x$.getVisibleFieldNames() parameterNames <- parameterNames[parameterNames != "nMax"] dataFrame <- .getAsDataFrame( parameterSet = x, parameterNames = parameterNames, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters, tableColumnNames = .getTableColumnNames(design = x$.design) ) return(dataFrame) } rpact/R/f_simulation_multiarm.R0000644000176200001440000011607314430677616016340 0ustar liggesusers## | ## | *Simulation of multi-arm design with combination test and conditional error approach* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 6991 $ ## | Last changed: $Date: 2023-05-16 14:50:11 +0200 (Di, 16 Mai 2023) $ ## | Last changed by: $Author: wassmer $ ## | #' @include f_core_utilities.R NULL .getIndicesOfClosedHypothesesSystemForSimulation <- function(gMax) { indices <- as.matrix(expand.grid(rep(list(1:0), gMax)))[1:(2^gMax - 1), ] if (gMax == 1) { indices <- as.matrix(indices) } return(indices) } .selectTreatmentArms <- function(stage, effectVector, typeOfSelection, epsilonValue, rValue, threshold, selectArmsFunction, survival = FALSE) { gMax <- length(effectVector) if (typeOfSelection != "userDefined") { if (typeOfSelection == "all") { selectedArms <- rep(TRUE, gMax) } else { selectedArms <- rep(FALSE, gMax) if (typeOfSelection == "best") { selectedArms[which.max(effectVector)] <- TRUE } else if (tolower(typeOfSelection) == "rbest") { selectedArms[order(effectVector, decreasing = TRUE)[1:rValue]] <- TRUE selectedArms[is.na(effectVector)] <- FALSE } else if (typeOfSelection == "epsilon") { selectedArms[max(effectVector, na.rm = TRUE) - effectVector <= epsilonValue] <- TRUE selectedArms[is.na(effectVector)] <- FALSE } } selectedArms[effectVector <= threshold] <- FALSE } else { functionArgumentNames <- .getFunctionArgumentNames(selectArmsFunction, ignoreThreeDots = TRUE) if (length(functionArgumentNames) == 1) { .assertIsValidFunction( fun = selectArmsFunction, funArgName = "selectArmsFunction", expectedArguments = c("effectVector"), validateThreeDots = FALSE ) selectedArms <- selectArmsFunction(effectVector) } else { .assertIsValidFunction( fun = selectArmsFunction, funArgName = "selectArmsFunction", expectedArguments = c("effectVector", "stage"), validateThreeDots = FALSE ) selectedArms <- selectArmsFunction(effectVector = effectVector, stage = stage) } msg <- paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'selectArmsFunction' returned an illegal or undefined result (", .arrayToString(selectedArms), "); " ) if (length(selectedArms) != gMax) { stop(msg, "the output must be a logical vector of length 'gMax' (", gMax, ")") } if (!is.logical(selectedArms)) { stop(msg, "the output must be a logical vector (is ", .getClassName(selectedArms), ")") } } if (!survival) { selectedArms <- c(selectedArms, TRUE) } return(selectedArms) } .performClosedCombinationTestForSimulationMultiArm <- function(..., stageResults, design, indices, intersectionTest, successCriterion) { if (.isTrialDesignGroupSequential(design) && (design$kMax > 1)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Group sequential design cannot be used for designs with treatment arm selection" ) } gMax <- nrow(stageResults$testStatistics) kMax <- design$kMax adjustedStageWisePValues <- matrix(NA_real_, nrow = 2^gMax - 1, ncol = kMax) overallAdjustedTestStatistics <- matrix(NA_real_, nrow = 2^gMax - 1, ncol = kMax) rejected <- matrix(FALSE, nrow = gMax, ncol = kMax) rejectedIntersections <- matrix(FALSE, nrow = nrow(indices), ncol = kMax) futility <- matrix(FALSE, nrow = gMax, ncol = kMax - 1) futilityIntersections <- matrix(FALSE, nrow = nrow(indices), ncol = kMax - 1) rejectedIntersectionsBefore <- matrix(FALSE, nrow = nrow(indices), ncol = 1) successStop <- rep(FALSE, kMax) futilityStop <- rep(FALSE, kMax - 1) if (.isTrialDesignFisher(design)) { weightsFisher <- .getWeightsFisher(design) } else { weightsInverseNormal <- .getWeightsInverseNormal(design) } if (gMax == 1) { intersectionTest <- "Bonferroni" } separatePValues <- stageResults$separatePValues if (intersectionTest == "Dunnett") { subjectsPerStage <- stageResults[[ifelse( !is.null(stageResults[["subjectsPerStage"]]), "subjectsPerStage", "eventsPerStage" )]] testStatistics <- stageResults$testStatistics } else { subjectsPerStage <- NULL testStatistics <- NULL } for (k in 1:kMax) { if (intersectionTest == "Dunnett") { allocationRatiosPerStage <- rep(stageResults$allocationRatioPlanned[k], gMax) allocationRatiosPerStage[is.na(subjectsPerStage[1:gMax, k])] <- NA_real_ } for (i in 1:(2^gMax - 1)) { if (!all(is.na(separatePValues[indices[i, ] == 1, k]))) { if (intersectionTest == "Dunnett") { allocationRatiosSelected <- as.numeric(na.omit(allocationRatiosPerStage[indices[i, ] == 1])) sigma <- sqrt(allocationRatiosSelected / (1 + allocationRatiosSelected)) %*% sqrt(t(allocationRatiosSelected / (1 + allocationRatiosSelected))) diag(sigma) <- 1 maxTestStatistic <- max(testStatistics[indices[i, ] == 1, k], na.rm = TRUE) adjustedStageWisePValues[i, k] <- 1 - .getMultivariateDistribution( type = "normal", upper = maxTestStatistic, sigma = sigma, df = NA_real_ ) } # Bonferroni adjusted p-values else if (intersectionTest == "Bonferroni") { adjustedStageWisePValues[i, k] <- min(c(sum(indices[ i, !is.na(separatePValues[, k]) ]) * min(separatePValues[indices[i, ] == 1, k], na.rm = TRUE), 1)) } # Simes adjusted p-values else if (intersectionTest == "Simes") { adjustedStageWisePValues[i, k] <- min(sum(indices[ i, !is.na(separatePValues[, k]) ]) / (1:sum(indices[i, !is.na(separatePValues[, k])])) * sort(separatePValues[indices[i, ] == 1, k])) } # Sidak adjusted p-values else if (intersectionTest == "Sidak") { adjustedStageWisePValues[i, k] <- 1 - (1 - min(separatePValues[indices[i, ] == 1, k], na.rm = TRUE))^ sum(indices[i, !is.na(separatePValues[, k])]) } # Hierarchically ordered hypotheses else if (intersectionTest == "Hierarchical") { separatePValues <- separatePValues separatePValues[is.na(separatePValues[, 1:kMax])] <- 1 adjustedStageWisePValues[i, k] <- separatePValues[min(which(indices[i, ] == 1)), k] } if (.isTrialDesignFisher(design)) { overallAdjustedTestStatistics[i, k] <- prod(adjustedStageWisePValues[i, 1:k]^weightsFisher[1:k]) } else { overallAdjustedTestStatistics[i, k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(adjustedStageWisePValues[i, 1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) } } if (.isTrialDesignFisher(design)) { rejectedIntersections[i, k] <- (overallAdjustedTestStatistics[i, k] <= design$criticalValues[k]) if (k < kMax) { futilityIntersections[i, k] <- (adjustedStageWisePValues[i, k] >= design$alpha0Vec[k]) } } else if (.isTrialDesignInverseNormal(design)) { rejectedIntersections[i, k] <- (overallAdjustedTestStatistics[i, k] >= design$criticalValues[k]) if (k < kMax) { futilityIntersections[i, k] <- (overallAdjustedTestStatistics[i, k] <= design$futilityBounds[k]) } } rejectedIntersections[is.na(rejectedIntersections[, k]), k] <- FALSE if ((k == kMax) && !rejectedIntersections[1, k]) { break } } rejectedIntersections[, k] <- rejectedIntersections[, k] | rejectedIntersectionsBefore rejectedIntersectionsBefore <- matrix(rejectedIntersections[, k], ncol = 1) for (j in 1:gMax) { rejected[j, k] <- all(rejectedIntersections[indices[, j] == 1, k], na.rm = TRUE) if (k < kMax) { futility[j, k] <- any(futilityIntersections[indices[, j] == 1, k], na.rm = TRUE) } } if (successCriterion == "all") { successStop[k] <- all(rejected[stageResults$selectedArms[1:gMax, k], k]) } else { successStop[k] <- any(rejected[, k]) } if (k < kMax) { futilityStop[k] <- all(futility[stageResults$selectedArms[1:gMax, k], k]) if (all(stageResults$selectedArms[1:gMax, k + 1] == FALSE)) { futilityStop[k] <- TRUE } } } return(list( separatePValues = separatePValues, adjustedStageWisePValues = adjustedStageWisePValues, overallAdjustedTestStatistics = overallAdjustedTestStatistics, rejected = rejected, rejectedIntersections = rejectedIntersections, selectedArms = stageResults$selectedArms, successStop = successStop, futilityStop = futilityStop )) } .getCriticalValuesDunnettForSimulation <- function(alpha, indices, allocationRatioPlanned) { if (allocationRatioPlanned[1] != allocationRatioPlanned[2]) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "The conditional Dunnett test assumes equal allocation ratios over the stages" ) } gMax <- ncol(indices) frac <- rep(allocationRatioPlanned[1], gMax) / (1 + allocationRatioPlanned[1]) criticalValuesDunnett <- rep(NA_real_, 2^gMax - 1) for (i in 1:(2^gMax - 1)) { zeta <- sqrt(frac[indices[i, ] == 1]) sigma <- zeta %*% t(zeta) diag(sigma) <- 1 criticalValuesDunnett[i] <- .getMultivariateDistribution( type = "quantile", upper = NA_real_, sigma = sigma, alpha = alpha ) } return(criticalValuesDunnett) } .performClosedConditionalDunnettTestForSimulation <- function(stageResults, design, indices, criticalValuesDunnett, successCriterion) { testStatistics <- stageResults$testStatistics separatePValues <- stageResults$separatePValues subjectsPerStage <- stageResults$subjectsPerStage overallTestStatistics <- stageResults$overallTestStatistics gMax <- nrow(testStatistics) informationAtInterim <- design$informationAtInterim secondStageConditioning <- design$secondStageConditioning kMax <- 2 frac <- rep(stageResults$allocationRatioPlanned[1], gMax) / (1 + stageResults$allocationRatioPlanned[1]) conditionalErrorRate <- matrix(NA_real_, nrow = 2^gMax - 1, ncol = 2) secondStagePValues <- matrix(NA_real_, nrow = 2^gMax - 1, ncol = 2) rejected <- matrix(FALSE, nrow = gMax, ncol = 2) rejectedIntersections <- matrix(FALSE, nrow = nrow(indices), ncol = kMax) futilityStop <- FALSE successStop <- rep(FALSE, kMax) signedTestStatistics <- testStatistics signedOverallTestStatistics <- overallTestStatistics signedOverallTestStatistics[, 2] <- sqrt(informationAtInterim) * testStatistics[, 1] + sqrt(1 - informationAtInterim) * testStatistics[, 2] if (all(stageResults$selectedArms[1:gMax, 2] == FALSE)) { futilityStop <- TRUE } for (i in 1:(2^gMax - 1)) { integrand <- function(x) { innerProduct <- 1 for (g in (1:gMax)) { if (indices[i, g] == 1) { innerProduct <- innerProduct * stats::pnorm(((criticalValuesDunnett[i] - sqrt(informationAtInterim) * signedTestStatistics[g, 1] + sqrt(1 - informationAtInterim) * sqrt(frac[g]) * x)) / sqrt((1 - informationAtInterim) * (1 - frac[g]))) } } return(innerProduct * dnorm(x)) } conditionalErrorRate[i, 1] <- 1 - integrate(integrand, lower = -Inf, upper = Inf)$value if (!all(is.na(separatePValues[indices[i, ] == 1, 2]))) { if (secondStageConditioning) { maxOverallTestStatistic <- max( signedOverallTestStatistics[indices[i, ] == 1, 2], na.rm = TRUE ) integrand <- function(x) { innerProduct <- 1 for (g in (1:gMax)) { if ((indices[i, g] == 1) && !is.na(overallTestStatistics[g, 2])) { innerProduct <- innerProduct * stats::pnorm(((maxOverallTestStatistic - sqrt(informationAtInterim) * signedTestStatistics[g, 1] + sqrt(1 - informationAtInterim) * sqrt(frac[g]) * x)) / sqrt((1 - informationAtInterim) * (1 - frac[g]))) } } return(innerProduct * dnorm(x)) } secondStagePValues[i, 2] <- 1 - integrate(integrand, lower = -Inf, upper = Inf)$value } else { maxTestStatistic <- max(signedTestStatistics[indices[i, ] == 1, 2], na.rm = TRUE) integrand <- function(x) { innerProduct <- 1 for (g in (1:gMax)) { if ((indices[i, g] == 1) && !is.na(separatePValues[g, 2])) { innerProduct <- innerProduct * stats::pnorm(((maxTestStatistic + sqrt(frac[g]) * x)) / sqrt(1 - frac[g])) } } return(innerProduct * dnorm(x)) } secondStagePValues[i, 2] <- 1 - integrate(integrand, lower = -Inf, upper = Inf)$value } } rejectedIntersections[i, 2] <- (secondStagePValues[i, 2] <= conditionalErrorRate[i, 1]) rejectedIntersections[is.na(rejectedIntersections[, 2]), 2] <- FALSE if (!rejectedIntersections[1, 2]) { break } } for (j in 1:gMax) { rejected[j, 2] <- all(rejectedIntersections[indices[, j] == 1, 2], na.rm = TRUE) } if (successCriterion == "all") { successStop[2] <- all(rejected[stageResults$selectedArms[1:gMax, 2], 2]) } else { successStop[2] <- any(rejected[, 2]) } return(list( separatePValues = separatePValues, conditionalErrorRate = conditionalErrorRate, secondStagePValues = secondStagePValues, rejected = rejected, rejectedIntersections = rejectedIntersections, selectedArms = stageResults$selectedArms, successStop = successStop, futilityStop = futilityStop )) } .createSimulationResultsMultiArmObject <- function(..., design, activeArms, effectMatrix, typeOfShape, muMaxVector = NA_real_, # means only piMaxVector = NA_real_, # rates only piControl = NA_real_, # rates only omegaMaxVector = NA_real_, # survival only gED50, slope, intersectionTest, stDev = NA_real_, # means only directionUpper = NA, # rates + survival only adaptations, typeOfSelection, effectMeasure, successCriterion, epsilonValue, rValue, threshold, plannedSubjects = NA_real_, # means + rates only plannedEvents = NA_real_, # survival only allocationRatioPlanned, minNumberOfSubjectsPerStage = NA_real_, # means + rates only maxNumberOfSubjectsPerStage = NA_real_, # means + rates only minNumberOfEventsPerStage = NA_real_, # survival only maxNumberOfEventsPerStage = NA_real_, # survival only conditionalPower, thetaH1 = NA_real_, # means + survival only stDevH1 = NA_real_, # means only piTreatmentsH1 = NA_real_, # rates only piControlH1 = NA_real_, # rates only maxNumberOfIterations, seed, calcSubjectsFunction = NULL, # means + rates only calcEventsFunction = NULL, # survival only selectArmsFunction, showStatistics, endpoint = c("means", "rates", "survival")) { endpoint <- match.arg(endpoint) .assertIsSinglePositiveInteger(activeArms, "activeArms", naAllowed = FALSE, validateType = FALSE) if (activeArms > 8) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'activeArms' (", activeArms, ") max not exceed 8") } .assertIsSingleNumber(threshold, "threshold", naAllowed = FALSE) .assertIsSingleNumber(gED50, "gED50", naAllowed = TRUE) .assertIsInOpenInterval(gED50, "gED50", 0, NULL, naAllowed = TRUE) .assertIsSingleNumber(slope, "slope", naAllowed = TRUE) .assertIsInOpenInterval(slope, "slope", 0, NULL, naAllowed = TRUE) .assertIsSinglePositiveInteger(rValue, "rValue", naAllowed = TRUE, validateType = FALSE) .assertIsNumericVector(allocationRatioPlanned, "allocationRatioPlanned", naAllowed = TRUE) .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM, naAllowed = TRUE) .assertIsSingleNumber(conditionalPower, "conditionalPower", naAllowed = TRUE) .assertIsInOpenInterval(conditionalPower, "conditionalPower", 0, 1, naAllowed = TRUE) .assertIsLogicalVector(adaptations, "adaptations", naAllowed = TRUE) if (endpoint %in% c("means", "rates")) { .assertIsNumericVector(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", naAllowed = TRUE) .assertIsNumericVector(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", naAllowed = TRUE) } else if (endpoint == "survival") { .assertIsNumericVector(minNumberOfEventsPerStage, "minNumberOfEventsPerStage", naAllowed = TRUE) .assertIsNumericVector(maxNumberOfEventsPerStage, "maxNumberOfEventsPerStage", naAllowed = TRUE) } .assertIsSinglePositiveInteger(maxNumberOfIterations, "maxNumberOfIterations", validateType = FALSE) .assertIsSingleLogical(showStatistics, "showStatistics", naAllowed = FALSE) .assertIsSingleNumber(seed, "seed", naAllowed = TRUE) if (endpoint %in% c("rates", "survival")) { .assertIsSingleLogical(directionUpper, "directionUpper") } if (endpoint %in% c("means", "survival")) { .assertIsSingleNumber(thetaH1, "thetaH1", naAllowed = TRUE) # means + survival only } if (endpoint == "means") { .assertIsValidStandardDeviation(stDev) # means only .assertIsSingleNumber(stDevH1, "stDevH1", naAllowed = TRUE) .assertIsInOpenInterval(stDevH1, "stDevH1", 0, NULL, naAllowed = TRUE) } successCriterion <- .assertIsValidSuccessCriterion(successCriterion) effectMeasure <- .assertIsValidEffectMeasure(effectMeasure) if (endpoint == "means") { simulationResults <- SimulationResultsMultiArmMeans(design, showStatistics = showStatistics) } else if (endpoint == "rates") { simulationResults <- SimulationResultsMultiArmRates(design, showStatistics = showStatistics) } else if (endpoint == "survival") { simulationResults <- SimulationResultsMultiArmSurvival(design, showStatistics = showStatistics) } gMax <- activeArms kMax <- design$kMax intersectionTest <- .getCorrectedIntersectionTestMultiArmIfNecessary( design, intersectionTest, userFunctionCallEnabled = TRUE ) .assertIsValidIntersectionTestMultiArm(design, intersectionTest) typeOfSelection <- .assertIsValidTypeOfSelection(typeOfSelection, rValue, epsilonValue, activeArms) if (length(typeOfSelection) == 1 && typeOfSelection == "userDefined" && !is.null(threshold) && length(threshold) == 1 && threshold != -Inf) { warning("'threshold' (", threshold, ") will be ignored because 'typeOfSelection' = \"userDefined\"", call. = FALSE) threshold <- -Inf } if (length(typeOfSelection) == 1 && typeOfSelection != "userDefined" && !is.null(selectArmsFunction)) { warning("'selectArmsFunction' will be ignored because 'typeOfSelection' is not \"userDefined\"", call. = FALSE) } else if (!is.null(selectArmsFunction) && is.function(selectArmsFunction)) { simulationResults$selectArmsFunction <- selectArmsFunction } typeOfShape <- .assertIsValidTypeOfShape(typeOfShape) if (endpoint %in% c("rates", "survival")) { .setValueAndParameterType(simulationResults, "directionUpper", directionUpper, TRUE) } if (endpoint == "means") { effectMatrix <- .assertIsValidEffectMatrixMeans( typeOfShape = typeOfShape, effectMatrix = effectMatrix, muMaxVector = muMaxVector, gED50 = gED50, gMax = gMax, slope = slope ) if (typeOfShape == "userDefined") { muMaxVector <- effectMatrix[, gMax] } else { .assertIsNumericVector(muMaxVector, "muMaxVector") } .setValueAndParameterType( simulationResults, "muMaxVector", muMaxVector, C_ALTERNATIVE_POWER_SIMULATION_DEFAULT ) if (typeOfShape == "userDefined") { simulationResults$.setParameterType("muMaxVector", C_PARAM_DERIVED) } } else if (endpoint == "rates") { .assertIsSingleNumber(piTreatmentsH1, "piTreatmentsH1", naAllowed = TRUE) .assertIsInOpenInterval(piTreatmentsH1, "piTreatmentsH1", 0, 1, naAllowed = TRUE) piTreatmentsH1 <- .ignoreParameterIfNotUsed( "piTreatmentsH1", piTreatmentsH1, kMax > 1, "design is fixed ('kMax' = 1)", "Assumed active rate(s)" ) .setValueAndParameterType(simulationResults, "piTreatmentsH1", piTreatmentsH1, NA_real_) .assertIsSingleNumber(piControl, "piControl", naAllowed = FALSE) # , noDefaultAvailable = TRUE) .assertIsInOpenInterval(piControl, "piControl", 0, 1, naAllowed = FALSE) .setValueAndParameterType(simulationResults, "piControl", piControl, 0.2) piControlH1 <- .ignoreParameterIfNotUsed( "piControlH1", piControlH1, kMax > 1, "design is fixed ('kMax' = 1)", "Assumed control rate" ) .assertIsSingleNumber(piControlH1, "piControlH1", naAllowed = TRUE) .assertIsInOpenInterval(piControlH1, "piControlH1", 0, 1, naAllowed = TRUE) .setValueAndParameterType(simulationResults, "piControlH1", piControlH1, NA_real_) effectMatrix <- .assertIsValidEffectMatrixRates( typeOfShape = typeOfShape, effectMatrix = effectMatrix, piMaxVector = piMaxVector, piControl = piControl, gED50 = gED50, gMax = gMax, slope = slope ) if (typeOfShape == "userDefined") { piMaxVector <- effectMatrix[, gMax] } .setValueAndParameterType(simulationResults, "piMaxVector", piMaxVector, C_PI_1_DEFAULT) if (typeOfShape == "userDefined") { simulationResults$.setParameterType("piMaxVector", C_PARAM_DERIVED) } } else if (endpoint == "survival") { effectMatrix <- .assertIsValidEffectMatrixSurvival(typeOfShape, effectMatrix, omegaMaxVector, gED50, gMax, slope) if (typeOfShape == "userDefined") { omegaMaxVector <- effectMatrix[, gMax] } .setValueAndParameterType(simulationResults, "omegaMaxVector", omegaMaxVector, C_RANGE_OF_HAZARD_RATIOS_DEFAULT) if (typeOfShape == "userDefined") { simulationResults$.setParameterType("omegaMaxVector", C_PARAM_DERIVED) } .assertIsIntegerVector(plannedEvents, "plannedEvents", validateType = FALSE) if (length(plannedEvents) != kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'plannedEvents' (", .arrayToString(plannedEvents), ") must have length ", kMax ) } .assertIsInClosedInterval(plannedEvents, "plannedEvents", lower = 1, upper = NULL) .assertValuesAreStrictlyIncreasing(plannedEvents, "plannedEvents") .setValueAndParameterType(simulationResults, "plannedEvents", plannedEvents, NA_real_) } .assertIsValidThreshold(threshold, gMax) if (endpoint %in% c("means", "rates")) { .assertIsValidPlannedSubjects(plannedSubjects, kMax) # means + rates only } if (endpoint %in% c("means", "survival")) { thetaH1 <- .ignoreParameterIfNotUsed( "thetaH1", thetaH1, kMax > 1, "design is fixed ('kMax' = 1)", "Assumed effect" ) } if (endpoint == "means") { stDevH1 <- .ignoreParameterIfNotUsed( "stDevH1", stDevH1, kMax > 1, "design is fixed ('kMax' = 1)", "Assumed standard deviation" ) } conditionalPower <- .ignoreParameterIfNotUsed( "conditionalPower", conditionalPower, kMax > 1, "design is fixed ('kMax' = 1)" ) if (endpoint %in% c("means", "rates")) { # means + rates only minNumberOfSubjectsPerStage <- .ignoreParameterIfNotUsed( "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, kMax > 1, "design is fixed ('kMax' = 1)" ) minNumberOfSubjectsPerStage <- .assertIsValidNumberOfSubjectsPerStage(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, calcSubjectsFunction, kMax, endpoint = endpoint ) maxNumberOfSubjectsPerStage <- .ignoreParameterIfNotUsed( "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, kMax > 1, "design is fixed ('kMax' = 1)" ) maxNumberOfSubjectsPerStage <- .assertIsValidNumberOfSubjectsPerStage(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, calcSubjectsFunction, kMax, endpoint = endpoint ) if (kMax > 1) { if (!all(is.na(maxNumberOfSubjectsPerStage - minNumberOfSubjectsPerStage)) && any(maxNumberOfSubjectsPerStage - minNumberOfSubjectsPerStage < 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") must be not smaller than minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ")" ) } .setValueAndParameterType( simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_ ) .setValueAndParameterType( simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_ ) } } else if (endpoint == "survival") { minNumberOfEventsPerStage <- .ignoreParameterIfNotUsed( "minNumberOfEventsPerStage", minNumberOfEventsPerStage, kMax > 1, "design is fixed ('kMax' = 1)" ) minNumberOfEventsPerStage <- .assertIsValidNumberOfSubjectsPerStage(minNumberOfEventsPerStage, "minNumberOfEventsPerStage", plannedEvents, conditionalPower, calcEventsFunction, kMax, endpoint = endpoint ) maxNumberOfEventsPerStage <- .ignoreParameterIfNotUsed( "maxNumberOfEventsPerStage", maxNumberOfEventsPerStage, kMax > 1, "design is fixed ('kMax' = 1)" ) maxNumberOfEventsPerStage <- .assertIsValidNumberOfSubjectsPerStage(maxNumberOfEventsPerStage, "maxNumberOfEventsPerStage", plannedEvents, conditionalPower, calcEventsFunction, kMax, endpoint = endpoint ) if (kMax > 1) { if (!all(is.na(maxNumberOfEventsPerStage - minNumberOfEventsPerStage)) && any(maxNumberOfEventsPerStage - minNumberOfEventsPerStage < 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfEventsPerStage' (", .arrayToString(maxNumberOfEventsPerStage), ") must be not smaller than 'minNumberOfEventsPerStage' (", .arrayToString(minNumberOfEventsPerStage), ")" ) } .setValueAndParameterType( simulationResults, "minNumberOfEventsPerStage", minNumberOfEventsPerStage, NA_real_ ) .setValueAndParameterType( simulationResults, "maxNumberOfEventsPerStage", maxNumberOfEventsPerStage, NA_real_ ) } } if (kMax == 1 && !is.na(conditionalPower)) { warning("'conditionalPower' will be ignored for fixed sample design", call. = FALSE) } if (endpoint %in% c("means", "rates") && kMax == 1 && !is.null(calcSubjectsFunction)) { warning("'calcSubjectsFunction' will be ignored for fixed sample design", call. = FALSE) } if (endpoint == "survival" && kMax == 1 && !is.null(calcEventsFunction)) { warning("'calcEventsFunction' will be ignored for fixed sample design", call. = FALSE) } if (endpoint %in% c("means", "rates") && is.na(conditionalPower) && is.null(calcSubjectsFunction)) { if (length(minNumberOfSubjectsPerStage) != 1 || !is.na(minNumberOfSubjectsPerStage)) { warning("'minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ") will be ignored because ", "neither 'conditionalPower' nor 'calcSubjectsFunction' is defined", call. = FALSE ) simulationResults$minNumberOfSubjectsPerStage <- NA_real_ } if (length(maxNumberOfSubjectsPerStage) != 1 || !is.na(maxNumberOfSubjectsPerStage)) { warning("'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") will be ignored because ", "neither 'conditionalPower' nor 'calcSubjectsFunction' is defined", call. = FALSE ) simulationResults$maxNumberOfSubjectsPerStage <- NA_real_ } } if (endpoint == "survival" && is.na(conditionalPower) && is.null(calcEventsFunction)) { if (length(minNumberOfEventsPerStage) != 1 || !is.na(minNumberOfEventsPerStage)) { warning("'minNumberOfEventsPerStage' (", .arrayToString(minNumberOfEventsPerStage), ") ", "will be ignored because neither 'conditionalPower' nor 'calcEventsFunction' is defined", call. = FALSE ) simulationResults$minNumberOfEventsPerStage <- NA_real_ } if (length(maxNumberOfEventsPerStage) != 1 || !is.na(maxNumberOfEventsPerStage)) { warning("'maxNumberOfEventsPerStage' (", .arrayToString(maxNumberOfEventsPerStage), ") ", "will be ignored because neither 'conditionalPower' nor 'calcEventsFunction' is defined", call. = FALSE ) simulationResults$maxNumberOfEventsPerStage <- NA_real_ } } if (endpoint %in% c("means", "rates")) { simulationResults$.setParameterType( "calcSubjectsFunction", ifelse(kMax == 1, C_PARAM_NOT_APPLICABLE, ifelse(!is.null(calcSubjectsFunction) && kMax > 1, C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE) ) ) } else if (endpoint == "survival") { simulationResults$.setParameterType( "calcEventsFunction", ifelse(kMax == 1, C_PARAM_NOT_APPLICABLE, ifelse(!is.null(calcEventsFunction) && kMax > 1, C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE) ) ) } if (endpoint == "means") { if (is.null(calcSubjectsFunction)) { calcSubjectsFunction <- .getSimulationMeansMultiArmStageSubjects } else { .assertIsValidFunction( fun = calcSubjectsFunction, funArgName = "calcSubjectsFunction", expectedFunction = .getSimulationMeansMultiArmStageSubjects ) } simulationResults$calcSubjectsFunction <- calcSubjectsFunction } else if (endpoint == "rates") { if (is.null(calcSubjectsFunction)) { calcSubjectsFunction <- .getSimulationRatesMultiArmStageSubjects } else { .assertIsValidFunction( fun = calcSubjectsFunction, funArgName = "calcSubjectsFunction", expectedFunction = .getSimulationRatesMultiArmStageSubjects ) } simulationResults$calcSubjectsFunction <- calcSubjectsFunction } else if (endpoint == "survival") { if (is.null(calcEventsFunction)) { calcEventsFunction <- .getSimulationSurvivalMultiArmStageEvents } else { .assertIsValidFunction( fun = calcEventsFunction, funArgName = "calcEventsFunction", expectedFunction = .getSimulationSurvivalMultiArmStageEvents ) } simulationResults$calcEventsFunction <- calcEventsFunction } if (endpoint == "means") { .setValueAndParameterType(simulationResults, "stDev", stDev, C_STDEV_DEFAULT) } if (any(is.na(allocationRatioPlanned))) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, design$kMax) } else if (length(allocationRatioPlanned) != design$kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'allocationRatioPlanned' (", .arrayToString(allocationRatioPlanned), ") ", "must have length 1 or ", design$kMax, " (kMax)" ) } if (length(unique(allocationRatioPlanned)) == 1) { .setValueAndParameterType( simulationResults, "allocationRatioPlanned", allocationRatioPlanned[1], defaultValue = 1 ) } else { .setValueAndParameterType( simulationResults, "allocationRatioPlanned", allocationRatioPlanned, defaultValue = rep(1, design$kMax) ) } .setValueAndParameterType(simulationResults, "effectMatrix", t(effectMatrix), NULL) if (endpoint %in% c("means", "rates")) { .setValueAndParameterType(simulationResults, "plannedSubjects", plannedSubjects, NA_real_) .setValueAndParameterType(simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE ) } else if (endpoint == "survival") { .setValueAndParameterType(simulationResults, "plannedEvents", plannedEvents, NA_real_) .setValueAndParameterType(simulationResults, "minNumberOfEventsPerStage", minNumberOfEventsPerStage, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "maxNumberOfEventsPerStage", maxNumberOfEventsPerStage, NA_real_, notApplicableIfNA = TRUE ) } .setValueAndParameterType(simulationResults, "conditionalPower", conditionalPower, NA_real_, notApplicableIfNA = TRUE ) if (endpoint %in% c("means", "survival")) { .setValueAndParameterType(simulationResults, "thetaH1", thetaH1, NA_real_, notApplicableIfNA = TRUE) } if (endpoint == "means") { .setValueAndParameterType(simulationResults, "stDevH1", stDevH1, NA_real_, notApplicableIfNA = TRUE) } .setValueAndParameterType( simulationResults, "maxNumberOfIterations", as.integer(maxNumberOfIterations), C_MAX_SIMULATION_ITERATIONS_DEFAULT ) simulationResults$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) simulationResults$seed <- .setSeed(seed) if (is.null(adaptations) || all(is.na(adaptations))) { adaptations <- rep(TRUE, kMax - 1) } if (length(adaptations) != kMax - 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'adaptations' must have length ", (kMax - 1), " (kMax - 1)") } .setValueAndParameterType(simulationResults, "adaptations", adaptations, rep(TRUE, kMax - 1)) simulationResults$.setParameterType( "effectMatrix", ifelse(typeOfShape == "userDefined", C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE) ) .setValueAndParameterType(simulationResults, "activeArms", as.integer(activeArms), 3L) if (typeOfShape == "sigmoidEmax") { .setValueAndParameterType(simulationResults, "gED50", gED50, NA_real_) } .setValueAndParameterType(simulationResults, "slope", slope, 1) if (typeOfSelection != "userDefined") { .setValueAndParameterType(simulationResults, "threshold", threshold, -Inf) .setValueAndParameterType(simulationResults, "epsilonValue", epsilonValue, NA_real_) .setValueAndParameterType(simulationResults, "rValue", rValue, NA_real_) } .setValueAndParameterType(simulationResults, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_MULTIARMED_DEFAULT) .setValueAndParameterType(simulationResults, "typeOfSelection", typeOfSelection, C_TYPE_OF_SELECTION_DEFAULT) .setValueAndParameterType(simulationResults, "typeOfShape", typeOfShape, C_TYPE_OF_SHAPE_DEFAULT) .setValueAndParameterType(simulationResults, "successCriterion", successCriterion, C_SUCCESS_CRITERION_DEFAULT) .setValueAndParameterType(simulationResults, "effectMeasure", effectMeasure, C_EFFECT_MEASURE_DEFAULT) return(simulationResults) } rpact/R/f_object_r_code.R0000644000176200001440000010510114446300510014770 0ustar liggesusers## | ## | *Object R Code* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7132 $ ## | Last changed: $Date: 2023-06-26 14:15:08 +0200 (Mon, 26 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_constants.R #' @include f_logger.R NULL .getAgrumentSpecificFormattedValue <- function(value) { if (is.character(value)) { value <- paste0("\"", value, "\"") value[value == "\"NA\""] <- NA_character_ value[is.na(value)] <- "\"NA\"" return(value) } else if (is.integer(value)) { value[is.na(value)] <- "NA_integer_" } else if (is.numeric(value)) { value[!is.na(value)] <- format(value[!is.na(value)], digits = 8) value[is.na(value)] <- "NA_real_" } else if (is.complex(value)) { value[is.na(value)] <- "NA_complex_" } return(value) } .getArgumentValueRCode <- function(x, name) { if (is.null(x)) { return("NULL") } if (length(x) == 0) { if (is.list(x)) { return("list()") } else if (is.character(x)) { return("character(0)") } else if (is.integer(x)) { return("integer(0)") } else if (is.numeric(x)) { return("numeric(0)") } else if (is.complex(x)) { return("complex(0)") } } if (is.function(x) || isS4(x)) { return("NULL") } if (length(x) == 1 && is.na(x)) { if (is.character(x)) { return("NA_character_") } else if (is.integer(x)) { return("NA_integer_") } else if (is.numeric(x)) { return("NA_real_") } else if (is.complex(x)) { return("NA_complex_") } return("NA") } if (is.list(x)) { params <- c() for (paramName in names(x)) { paramValue <- x[[paramName]] if (name != "effectList" || paramName != "piControls" || (!is.null(paramValue) && length(paramValue) > 0)) { params <- c(params, paste0(paramName, " = ", .getArgumentValueRCode(x = paramValue, name = paramName))) } } return(paste0("list(", paste0(params, collapse = ", "), ")")) } leadingZeroAdded <- FALSE expectedResult <- "" if (name == "accrualTime" && length(x) > 0 && !is.na(x[1]) && x[1] != 0) { expectedResult <- "0" leadingZeroAdded <- TRUE } else if (name == "followUpTime" && length(x) == 1 && !is.na(x)) { x <- round(x, 3) } else if (name == "maxNumberOfSubjects" && length(x) == 1 && !is.na(x)) { x <- floor(x * 100) / 100 } else if (is.numeric(x) && !is.matrix(x)) { seqTest <- .reconstructSequenceCommand(x) if (!is.null(seqTest) && length(seqTest) == 1 && !is.na(seqTest) && grepl("^seq", seqTest)) { return(seqTest) } } if (is.matrix(x) && name == "effectMatrix") { x <- t(x) } for (i in 1:length(x)) { if (nchar(expectedResult) > 0) { expectedResult <- paste0(expectedResult, ", ") } expectedResult <- paste0(expectedResult, .getAgrumentSpecificFormattedValue(x[i])) } if (leadingZeroAdded || length(x) > 1) { expectedResult <- paste0("c(", expectedResult, ")") } if (is.matrix(x) && grepl("effectMatrix|effects|piTreatments|hazardRatios", name)) { expectedResult <- paste0("matrix(", expectedResult, ", ncol = ", ncol(x), ")") } return(expectedResult) } #' @rdname getObjectRCode #' @export rcmd <- function(obj, ..., leadingArguments = NULL, includeDefaultParameters = FALSE, stringWrapParagraphWidth = 90, prefix = "", postfix = "", stringWrapPrefix = "", newArgumentValues = list()) { getObjectRCode( obj = obj, leadingArguments = leadingArguments, includeDefaultParameters = includeDefaultParameters, stringWrapParagraphWidth = stringWrapParagraphWidth, prefix = prefix, postfix = postfix, stringWrapPrefix = stringWrapPrefix, newArgumentValues = newArgumentValues ) } #' #' @title #' Get Object R Code #' #' @description #' Returns the R source command of a result object. #' #' @param obj The result object. #' @param leadingArguments A character vector with arguments that shall be inserted at the beginning of the function command, #' e.g., \code{design = x}. Be careful with this option because the created R command may no longer be valid if used. #' @param includeDefaultParameters If \code{TRUE}, default parameters will be included in all \code{rpact} commands; #' default is \code{FALSE}. #' @param stringWrapParagraphWidth An integer value defining the number of characters after which a line break shall be inserted; #' set to \code{NULL} to insert no line breaks. #' @param prefix A character string that shall be added to the beginning of the R command. #' @param postfix A character string that shall be added to the end of the R command. #' @param stringWrapPrefix A prefix character string that shall be added to each new line, typically some spaces. #' @param newArgumentValues A named list with arguments that shall be renewed in the R command, e.g., #' \code{newArgumentValues = list(informationRates = c(0.5, 1))}. #' @param tolerance The tolerance for defining a value as default. #' @param pipeOperator The pipe operator to use in the R code, default is "none". #' @param output The output format, default is a character "vector". #' @param explicitPrint Show an explicit \code{print} command, default is \code{FALSE}. #' @inheritParams param_three_dots #' #' @details #' \code{\link[=getObjectRCode]{getObjectRCode()}} (short: \code{\link[=rcmd]{rcmd()}}) recreates #' the R commands that result in the specified object \code{obj}. #' \code{obj} must be an instance of class \code{ParameterSet}. #' #' @return A \code{\link[base]{character}} value or vector will be returned. #' #' @export #' getObjectRCode <- function(obj, ..., leadingArguments = NULL, includeDefaultParameters = FALSE, stringWrapParagraphWidth = 90, prefix = "", postfix = "", stringWrapPrefix = "", newArgumentValues = list(), tolerance = 1e-07, pipeOperator = c("auto", "none", "magrittr", "R"), output = c("vector", "cat", "test", "markdown", "internal"), explicitPrint = FALSE) { functionName <- deparse(substitute(obj)) functionName <- sub("\\(.*\\)$", "", functionName) output <- match.arg(output) .assertIsSingleLogical(includeDefaultParameters, "includeDefaultParameters") .assertIsSingleLogical(explicitPrint, "explicitPrint") if (!is.null(stringWrapParagraphWidth)) { .assertIsSingleInteger(stringWrapParagraphWidth, "stringWrapParagraphWidth", validateType = FALSE) .assertIsInClosedInterval(stringWrapParagraphWidth, "stringWrapParagraphWidth", lower = 10, upper = 50000) } .assertIsSingleCharacter(prefix, "prefix") .assertIsCharacter(postfix, "postfix") .assertIsSingleCharacter(stringWrapPrefix, "stringWrapPrefix") .assertIsSingleNumber(tolerance, "tolerance") .assertIsInClosedInterval(tolerance, "tolerance", lower = 1e-15, upper = 1e-03) if (output == "test") { stringWrapParagraphWidth <- NULL } else if (output %in% c("cat", "markdown")) { if (stringWrapPrefix == "") { stringWrapPrefix <- " " } } pipeOperator <- match.arg(pipeOperator) if (pipeOperator == "auto") { rVersion <- R.Version() if (rVersion$major >= 4 && rVersion$minor >= 1) { pipeOperator <- "R" } else if (.isPackageInstalled("magrittr")) { pipeOperator <- "magrittr" } else { pipeOperator <- "none" } } pipeOperatorPostfix <- "" if (pipeOperator == "magrittr") { pipeOperatorPostfix <- " %>% " } else if (pipeOperator == "R") { pipeOperatorPostfix <- " |> " } if (!is.null(obj) && is.function(obj)) { lines <- .getFunctionAsString(obj, stringWrapPrefix = stringWrapPrefix, stringWrapParagraphWidth = stringWrapParagraphWidth ) if (length(lines) == 0) { return("") } lines[1] <- paste0(prefix, lines[1]) if (any(postfix != "")) { if (grepl("(\\|>)|(%>%)", postfix[1])) { lines[length(lines)] <- paste0(lines[length(lines)], postfix[1]) if (length(postfix) > 1) { lines <- c(lines, paste0(postfix[2:length(postfix)], collapse = "")) } } else { lines <- c(lines, paste0(postfix, collapse = "")) } } return(lines) } .assertIsParameterSetClass(obj, "ParameterSet") if (!is.list(newArgumentValues)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'newArgumentValues' must be a named list ", "(is ", .getClassName(newArgumentValues), ")" ) } precondition <- character(0) if (is.null(leadingArguments)) { leadingArguments <- character(0) } if (!inherits(obj, "ConditionalPowerResults") && !is.null(obj[[".design"]]) && (is.null(leadingArguments) || !any(grepl("design", leadingArguments)))) { preconditionDesign <- getObjectRCode(obj$.design, prefix = ifelse(pipeOperator == "none", "design <- ", ""), postfix = pipeOperatorPostfix, includeDefaultParameters = includeDefaultParameters, stringWrapParagraphWidth = stringWrapParagraphWidth, stringWrapPrefix = stringWrapPrefix, newArgumentValues = newArgumentValues, pipeOperator = pipeOperator, output = "internal" ) if (!grepl("getDesign(GroupSequential|InverseNormal)\\(kMax = 1\\)", paste0(preconditionDesign, collapse = " "))) { precondition <- c(precondition, preconditionDesign) if (pipeOperator == "none") { leadingArguments <- c(leadingArguments, "design = design") } } } if (inherits(obj, "PerformanceScore")) { preconditionSimulationResults <- getObjectRCode(obj$.simulationResults, prefix = ifelse(pipeOperator == "none", "simulationResults <- ", ""), postfix = pipeOperatorPostfix, includeDefaultParameters = includeDefaultParameters, stringWrapParagraphWidth = stringWrapParagraphWidth, stringWrapPrefix = stringWrapPrefix, newArgumentValues = newArgumentValues, pipeOperator = pipeOperator, output = "internal" ) precondition <- c(precondition, preconditionSimulationResults) if (pipeOperator == "none") { leadingArguments <- c(leadingArguments, "simulationResults = simulationResults") } } if (!is.null(obj[[".dataInput"]]) && (is.null(leadingArguments) || !any(grepl("data", leadingArguments)))) { precondition <- c(precondition, getObjectRCode(obj$.dataInput, prefix = ifelse(pipeOperator == "none", "data <- ", ""), postfix = pipeOperatorPostfix, includeDefaultParameters = includeDefaultParameters, stringWrapParagraphWidth = stringWrapParagraphWidth, stringWrapPrefix = stringWrapPrefix, newArgumentValues = newArgumentValues, pipeOperator = pipeOperator, output = "internal" )) if (pipeOperator == "none") { leadingArguments <- c(leadingArguments, "dataInput = data") } } if (!is.null(obj[["calcSubjectsFunction"]]) && (is.null(leadingArguments) || !any(grepl("calcSubjectsFunction", leadingArguments))) && obj$.getParameterType("calcSubjectsFunction") == C_PARAM_USER_DEFINED) { precond <- getObjectRCode(obj$calcSubjectsFunction, prefix = "calcSubjectsFunction <- ", includeDefaultParameters = includeDefaultParameters, stringWrapParagraphWidth = stringWrapParagraphWidth, stringWrapPrefix = stringWrapPrefix, newArgumentValues = newArgumentValues, pipeOperator = pipeOperator, output = "internal" ) if (pipeOperator == "none") { precondition <- c(precondition, precond) } else { precondition <- c(precond, precondition) } } if (!is.null(obj[["calcEventsFunction"]]) && (is.null(leadingArguments) || !any(grepl("calcEventsFunction", leadingArguments))) && obj$.getParameterType("calcEventsFunction") == C_PARAM_USER_DEFINED) { precond <- getObjectRCode(obj$calcEventsFunction, prefix = "calcEventsFunction <- ", includeDefaultParameters = includeDefaultParameters, stringWrapParagraphWidth = stringWrapParagraphWidth, stringWrapPrefix = stringWrapPrefix, newArgumentValues = newArgumentValues, pipeOperator = pipeOperator, output = "internal" ) if (pipeOperator == "none") { precondition <- c(precondition, precond) } else { precondition <- c(precond, precondition) } } if (!is.null(obj[["selectArmsFunction"]]) && (is.null(leadingArguments) || !any(grepl("selectArmsFunction", leadingArguments))) && !is.null(obj[["typeOfSelection"]]) && obj$typeOfSelection == "userDefined") { precond <- getObjectRCode(obj$selectArmsFunction, prefix = "selectArmsFunction <- ", includeDefaultParameters = includeDefaultParameters, stringWrapParagraphWidth = stringWrapParagraphWidth, stringWrapPrefix = stringWrapPrefix, newArgumentValues = newArgumentValues, pipeOperator = pipeOperator, output = "internal" ) if (pipeOperator == "none") { precondition <- c(precondition, precond) } else { precondition <- c(precond, precondition) } leadingArguments <- c(leadingArguments, "selectArmsFunction = selectArmsFunction") } if (inherits(obj, "ConditionalPowerResults") && !is.null(obj[[".stageResults"]]) && (is.null(leadingArguments) || !any(grepl("stageResults", leadingArguments)))) { precond <- getObjectRCode(obj$.stageResults, prefix = ifelse(pipeOperator == "none", "stageResults <- ", ""), postfix = pipeOperatorPostfix, includeDefaultParameters = includeDefaultParameters, stringWrapParagraphWidth = stringWrapParagraphWidth, stringWrapPrefix = stringWrapPrefix, newArgumentValues = newArgumentValues, pipeOperator = pipeOperator, output = "internal" ) if (pipeOperator == "none") { precondition <- c(precondition, precond) } else { precondition <- c(precond, precondition) } leadingArguments <- c(leadingArguments, "stageResults = stageResults") } if (grepl("SimulationResultsEnrichment(Means|Rates|Survival)", .getClassName(obj))) { precond <- paste0( "effectList <- ", .getArgumentValueRCode(obj$effectList, "effectList") ) if (pipeOperator == "none") { precondition <- c(precondition, precond) } else { precondition <- c(precond, precondition) } } precondition <- unique(precondition) if ("TrialDesignPlanMeans" == .getClassName(obj)) { if (obj$.isSampleSizeObject()) { functionName <- "getSampleSizeMeans" } else { functionName <- "getPowerMeans" } } else if ("TrialDesignPlanRates" == .getClassName(obj)) { if (obj$.isSampleSizeObject()) { functionName <- "getSampleSizeRates" } else { functionName <- "getPowerRates" } } else if ("TrialDesignPlanSurvival" == .getClassName(obj)) { if (obj$.isSampleSizeObject()) { functionName <- "getSampleSizeSurvival" } else { functionName <- "getPowerSurvival" } } else if (inherits(obj, "TrialDesign")) { functionName <- paste0("get", sub("^Trial", "", .getClassName(obj))) } else if (inherits(obj, "Dataset")) { functionName <- "getDataset" } else if (inherits(obj, "AnalysisResults")) { functionName <- "getAnalysisResults" } else if ("TrialDesignSet" == .getClassName(obj)) { functionName <- "getDesignSet" } else if ("TrialDesignCharacteristics" == .getClassName(obj)) { functionName <- "getDesignCharacteristics" } else if (inherits(obj, "SimulationResultsMeans")) { functionName <- "getSimulationMeans" } else if (inherits(obj, "SimulationResultsRates")) { functionName <- "getSimulationRates" } else if (inherits(obj, "SimulationResultsSurvival")) { functionName <- "getSimulationSurvival" } else if (inherits(obj, "SimulationResultsMultiArmMeans")) { functionName <- "getSimulationMultiArmMeans" } else if (inherits(obj, "SimulationResultsMultiArmRates")) { functionName <- "getSimulationMultiArmRates" } else if (inherits(obj, "SimulationResultsMultiArmSurvival")) { functionName <- "getSimulationMultiArmSurvival" } else if (inherits(obj, "SimulationResultsEnrichmentMeans")) { functionName <- "getSimulationEnrichmentMeans" } else if (inherits(obj, "SimulationResultsEnrichmentRates")) { functionName <- "getSimulationEnrichmentRates" } else if (inherits(obj, "SimulationResultsEnrichmentSurvival")) { functionName <- "getSimulationEnrichmentSurvival" } else if (inherits(obj, "PiecewiseSurvivalTime")) { functionName <- "getPiecewiseSurvivalTime" } else if (inherits(obj, "AccrualTime")) { functionName <- "getAccrualTime" } else if (inherits(obj, "StageResults")) { functionName <- "getStageResults" } else if (inherits(obj, "ConditionalPowerResults")) { functionName <- "getConditionalPower" } else if (inherits(obj, "PowerAndAverageSampleNumberResult")) { functionName <- "getPowerAndAverageSampleNumber" } else if (inherits(obj, "EventProbabilities")) { functionName <- "getEventProbabilities" } else if (inherits(obj, "NumberOfSubjects")) { functionName <- "getNumberOfSubjects" } else if (inherits(obj, "PerformanceScore")) { functionName <- "gePerformanceScore" } else if (inherits(obj, "SummaryFactory") || "SummaryFactory" == .getClassName(obj)) { return(getObjectRCode(obj$object, prefix = ifelse(pipeOperator == "none", "summary(", ""), postfix = { if (pipeOperator == "none") ")" else c(pipeOperatorPostfix, "summary()") }, includeDefaultParameters = includeDefaultParameters, stringWrapParagraphWidth = stringWrapParagraphWidth, stringWrapPrefix = stringWrapPrefix, newArgumentValues = newArgumentValues, pipeOperator = pipeOperator, output = output, explicitPrint = explicitPrint )) } else { stop("Runtime issue: function 'getObjectRCode' is not implemented for class ", .getClassName(obj)) } objNames <- names(obj) objNames <- objNames[objNames != "effectList"] if (inherits(obj, "ParameterSet")) { if (includeDefaultParameters) { objNames <- obj$.getInputParameters() } else { objNames <- obj$.getUserDefinedParameters() } objNames <- objNames[objNames != "stages"] } if (inherits(obj, "TrialDesign") && !inherits(obj, "TrialDesignConditionalDunnett") && !("informationRates" %in% objNames) && !("kMax" %in% objNames) && obj$kMax != 3) { objNames <- c("kMax", objNames) } thetaH0 <- NA_real_ if (inherits(obj, "SimulationResultsSurvival") && obj$.getParameterType("thetaH1") == "g") { objNames <- c(objNames, "thetaH1") thetaH0 <- obj[["thetaH0"]] } if (inherits(obj, "SimulationResultsSurvival")) { objNames <- objNames[objNames != "allocationRatioPlanned"] # allocation1 and allocation2 are used instead } if (inherits(obj, "AnalysisResults") && grepl("Fisher", .getClassName(obj))) { if (!is.null(obj[["seed"]]) && length(obj$seed) == 1 && !is.na(obj$seed)) { if (!("iterations" %in% objNames)) { objNames <- c(objNames, "iterations") } if (!("seed" %in% objNames)) { objNames <- c(objNames, "seed") } } else if (!is.null(obj[[".conditionalPowerResults"]]) && !is.null(obj$.conditionalPowerResults[["seed"]]) && length(obj$.conditionalPowerResults$seed) == 1 && !is.na(obj$.conditionalPowerResults$seed)) { if (!("iterations" %in% objNames)) { objNames <- c( objNames, ".conditionalPowerResults$iterations" ) } if (!("seed" %in% objNames)) { objNames <- c( objNames, ".conditionalPowerResults$seed" ) } } } if (!("accrualIntensity" %in% objNames) && !is.null(obj[[".accrualTime"]]) && !obj$.accrualTime$absoluteAccrualIntensityEnabled) { objNames <- c(objNames, "accrualIntensity") } newArgumentValueNames <- character(0) if (length(newArgumentValues) > 0) { newArgumentValueNames <- names(newArgumentValues) illegalArgumentValueNames <- newArgumentValueNames[which(!(newArgumentValueNames %in% names(obj)))] if (length(illegalArgumentValueNames) > 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", illegalArgumentValueNames, "' is not a valid ", functionName, "() argument" ) } defaultParams <- newArgumentValueNames[!(newArgumentValueNames %in% objNames)] objNames <- c(objNames, defaultParams) } if (inherits(obj, "TrialDesign") && "informationRates" %in% objNames && !("informationRates" %in% newArgumentValueNames)) { informationRates <- obj[["informationRates"]] if (!is.null(informationRates) && length(informationRates) > 0) { kMax <- obj[["kMax"]] if (isTRUE(all.equal( target = .getInformationRatesDefault(kMax), current = informationRates, tolerance = tolerance ))) { objNames <- objNames[objNames != "informationRates"] if (!("kMax" %in% objNames) && kMax != 3) { objNames <- c("kMax", objNames) } } } } if (inherits(obj, "Dataset")) { lines <- .getDatasetArgumentsRCodeLines(obj, complete = FALSE, digits = NA_integer_) argumentsRCode <- paste0(lines, collapse = ", ") } else { argumentsRCode <- "" arguments <- c() if (length(objNames) > 0) { for (name in objNames) { if (grepl("^\\.conditionalPowerResults\\$", name)) { name <- sub("^\\.conditionalPowerResults\\$", "", name) value <- obj$.conditionalPowerResults[[name]] } else { value <- obj[[name]] } if (name == "accrualTime" && inherits(obj, "AccrualTime") && !isTRUE(obj$endOfAccrualIsUserDefined) && isTRUE(length(obj$accrualIntensity) < length(value))) { value <- value[1:(length(value) - 1)] } if (name == "accrualIntensityRelative") { name <- "accrualIntensity" } if (name == "accrualIntensity" && !is.null(obj[[".accrualTime"]]) && !obj$.accrualTime$absoluteAccrualIntensityEnabled) { value <- obj$.accrualTime$accrualIntensityRelative } originalValue <- value newValue <- newArgumentValues[[name]] if (!is.null(newValue)) { originalValue <- newValue } value <- .getArgumentValueRCode(originalValue, name) if (name == "allocationRatioPlanned") { optimumAllocationRatio <- obj[["optimumAllocationRatio"]] if (!is.null(optimumAllocationRatio) && isTRUE(optimumAllocationRatio)) { value <- 0 } else if (inherits(obj, "ParameterSet")) { if (obj$.getParameterType("allocationRatioPlanned") == "g") { value <- 0 } } } else if (name == "optimumAllocationRatio") { name <- "allocationRatioPlanned" value <- 0 } else if (name == "maxNumberOfSubjects") { value <- .getArgumentValueRCode(originalValue[1], name) } else if (name == "thetaH1" && length(thetaH0) == 1 && !is.na(thetaH0) && value != 1) { value <- .getArgumentValueRCode(originalValue * thetaH0, name) } else if (name == "nPlanned") { if (!all(is.na(originalValue))) { value <- .getArgumentValueRCode(na.omit(originalValue), name) } } if (name == "calcSubjectsFunction" && obj$.getParameterType("calcSubjectsFunction") == C_PARAM_USER_DEFINED && !is.null(obj[["calcSubjectsFunction"]])) { value <- "calcSubjectsFunction" } else if (name == "calcEventsFunction" && obj$.getParameterType("calcEventsFunction") == C_PARAM_USER_DEFINED && !is.null(obj[["calcEventsFunction"]])) { value <- "calcEventsFunction" } if ((name == "twoSidedPower" && isFALSE(originalValue)) || name == "accrualIntensityRelative") { # do not add # arguments <- c(arguments, paste0(name, "_DoNotAdd")) } else { if (length(value) > 0 && nchar(as.character(value)) > 0) { argument <- paste0(name, " = ", value) } else { argument <- name } if (!(argument %in% leadingArguments)) { arguments <- c(arguments, argument) } } } } if (inherits(obj, "TrialDesignPlanSurvival")) { if (!("accrualTime" %in% objNames) && obj$.getParameterType("accrualTime") == "g" && !all(is.na(obj$accrualTime))) { # case 2: follow-up time and absolute intensity given accrualType2 <- (length(obj$accrualIntensity) == 1 && obj$accrualIntensity >= 1 && obj$.getParameterType("accrualIntensity") == "u" && obj$.getParameterType("followUpTime") == "u" && obj$.getParameterType("maxNumberOfSubjects") == "g") if (!accrualType2) { accrualTime <- .getArgumentValueRCode(obj$accrualTime, "accrualTime") if (length(obj$accrualTime) > 1 && length(obj$accrualTime) == length(obj$accrualIntensity) && (obj$.getParameterType("maxNumberOfSubjects") == "u" || obj$.getParameterType("followUpTime") == "u")) { accrualTime <- .getArgumentValueRCode(obj$accrualTime[1:(length(obj$accrualTime) - 1)], "accrualTime") } accrualTimeArg <- paste0("accrualTime = ", accrualTime) index <- which(grepl("^accrualIntensity", arguments)) if (length(index) == 1 && index > 1) { arguments <- c(arguments[1:(index - 1)], accrualTimeArg, arguments[index:length(arguments)]) } else { arguments <- c(arguments, accrualTimeArg) } } else if (obj$.getParameterType("followUpTime") == "u") { arguments <- c(arguments, "accrualTime = 0") } } accrualIntensityRelative <- obj$.accrualTime$accrualIntensityRelative if (!("accrualIntensity" %in% objNames) && !all(is.na(accrualIntensityRelative))) { arguments <- c(arguments, paste0( "accrualIntensity = ", .getArgumentValueRCode(accrualIntensityRelative, "accrualIntensity") )) } if (!("maxNumberOfSubjects" %in% objNames) && obj$.accrualTime$.getParameterType("maxNumberOfSubjects") == "u" && !(obj$.getParameterType("followUpTime") %in% c("u", "d"))) { arguments <- c(arguments, paste0( "maxNumberOfSubjects = ", .getArgumentValueRCode(obj$maxNumberOfSubjects[1], "maxNumberOfSubjects") )) } } else if (inherits(obj, "AnalysisResults")) { arguments <- c(arguments, paste0("stage = ", obj$.stageResults$stage)) } else if (inherits(obj, "StageResults")) { arguments <- c(arguments, paste0("stage = ", obj$stage)) } if (length(arguments) > 0) { argumentsRCode <- paste0(argumentsRCode, arguments, collapse = ", ") } } if (!is.null(leadingArguments) && length(leadingArguments) > 0) { leadingArguments <- unique(leadingArguments) leadingArguments <- paste0(leadingArguments, collapse = ", ") if (nchar(argumentsRCode) > 0) { argumentsRCode <- paste0(leadingArguments, ", ", argumentsRCode) } else { argumentsRCode <- leadingArguments } } rCode <- paste0(prefix, functionName, "(", argumentsRCode, ")") if (any(postfix != "")) { if (length(postfix) > 1 && grepl("(\\|>)|(%>%)", postfix[1])) { if (!grepl("(\\|>)|(%>%) *$", rCode[length(rCode)])) { rCode <- paste0(rCode, postfix[1]) } if (length(postfix) > 1) { rCode <- c(rCode, paste0(postfix[2:length(postfix)], collapse = "")) } } else { rCode <- paste0(rCode, paste0(postfix, collapse = "")) } } if (output != "internal" && explicitPrint) { if (pipeOperator == "none") { rCode <- paste0("print(", rCode, ")") } else { rCode[length(rCode)] <- paste0(rCode[length(rCode)], pipeOperatorPostfix) rCode <- c(rCode, "print()") } } rCode <- c(precondition, rCode) if (!is.null(stringWrapParagraphWidth) && length(stringWrapParagraphWidth) == 1 && !is.na(stringWrapParagraphWidth) && is.numeric(stringWrapParagraphWidth) && stringWrapParagraphWidth >= 10 && !is.null(stringWrapPrefix) && length(stringWrapPrefix) == 1 && !is.na(stringWrapPrefix) && is.character(stringWrapPrefix)) { rCodeNew <- character(0) for (rCodeLine in rCode) { rCodeLine <- gsub(" ", "___", rCodeLine) rCodeLine <- gsub(" ", "__", rCodeLine) rCodeLines <- strwrap(rCodeLine, width = stringWrapParagraphWidth) if (length(rCodeLines) > 1) { for (i in 2:length(rCodeLines)) { if (grepl("^ *(\\|>|%>%) *", rCodeLines[i])) { rCodeLines[i - 1] <- paste0(rCodeLines[i - 1], pipeOperatorPostfix) rCodeLines[i] <- sub("^ *(\\|>|%>%) *", "", rCodeLines[i]) } else if (!grepl("^ *([a-zA-Z0-9]+ *<-)|(^ *get[a-zA-Z]+\\()|summary\\(", rCodeLines[i])) { rCodeLines[i] <- paste0(stringWrapPrefix, rCodeLines[i]) } } } rCodeLines <- gsub("___", " ", rCodeLines) rCodeLines <- gsub("__", " ", rCodeLines) rCodeLines <- rCodeLines[nchar(trimws(rCodeLines)) > 0] rCodeNew <- c(rCodeNew, rCodeLines) } rCode <- rCodeNew } if (output %in% c("vector", "internal")) { return(rCode) } if (output == "cat") { collapse <- "\n" if (pipeOperator != "none") { collapse <- paste0("\n", stringWrapPrefix) } cat(paste0(rCode, collapse = collapse), "\n") return(invisible(rCode)) } if (output == "markdown") { collapse <- "\n" if (pipeOperator != "none") { collapse <- paste0("\n", stringWrapPrefix) if (explicitPrint) { rCode <- gsub("print\\(\\)", "print(markdown = TRUE)", rCode) } else if (!any(grepl("kable\\(", rCode))) { rCode[length(rCode)] <- paste0(rCode[length(rCode)], pipeOperatorPostfix) rCode <- c(rCode, "kable()") } } return(paste0(rCode, collapse = collapse)) } if (output == "test") { message("Evaluate and parse the following code:") cat(rCode, "\n") x <- eval(parse(text = rCode)) return(invisible(x)) } return(invisible(rCode)) } rpact/R/f_simulation_calc_subjects_function.R0000644000176200001440000004510014435554744021210 0ustar liggesusers## | ## | *User defined calc subjects function for simulation* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7019 $ ## | Last changed: $Date: 2023-05-31 07:23:47 +0200 (Mi, 31 Mai 2023) $ ## | Last changed by: $Author: pahlke $ ## | C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_MEANS_ARGUMENTS <- list( stage = "int", meanRatio = "bool", thetaH0 = "double", groups = "int", plannedSubjects = "NumericVector", allocationRatioPlanned = "NumericVector", minNumberOfSubjectsPerStage = "NumericVector", maxNumberOfSubjectsPerStage = "NumericVector", sampleSizesPerStage = "NumericVector", thetaH1 = "double", stDevH1 = "double", conditionalPower = "double", conditionalCriticalValue = "double" ) C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_MEANS_CPP_CODE <- c( "#include ", # "#include ", "using namespace Rcpp;", "typedef double (*calcSubjectsFunctionMeansPtrTemp)(", " int stage, ", " bool meanRatio, ", " double thetaH0, ", " int groups, ", " NumericVector plannedSubjects, ", " NumericVector allocationRatioPlanned, ", " NumericVector minNumberOfSubjectsPerStage, ", " NumericVector maxNumberOfSubjectsPerStage, ", " NumericVector sampleSizesPerStage, ", " double thetaH1, ", " double stDevH1, ", " double conditionalPower, ", " double conditionalCriticalValue); ", "double getSimulationMeansStageSubjectsTemp( ", " int stage, ", " bool meanRatio, ", " double thetaH0, ", " int groups, ", " NumericVector plannedSubjects, ", " NumericVector allocationRatioPlanned, ", " NumericVector minNumberOfSubjectsPerStage, ", " NumericVector maxNumberOfSubjectsPerStage, ", " NumericVector sampleSizesPerStage, ", " double thetaH1, ", " double stDevH1, ", " double conditionalPower, ", " double conditionalCriticalValue) { ", " {USER_CODE}", "} ", "// [[Rcpp::export]] ", "Rcpp::XPtr calcSubjectsFunctionCppTemp() { ", " return Rcpp::XPtr(", " new calcSubjectsFunctionMeansPtrTemp(&getSimulationMeansStageSubjectsTemp));", "}" ) C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_RATES_ARGUMENTS <- list( stage = "int", riskRatio = "bool", thetaH0 = "double", groups = "int", plannedSubjects = "NumericVector", directionUpper = "bool", allocationRatioPlanned = "NumericVector", minNumberOfSubjectsPerStage = "NumericVector", maxNumberOfSubjectsPerStage = "NumericVector", sampleSizesPerStage = "NumericVector", conditionalPower = "NumericVector", overallRate = "NumericVector", conditionalCriticalValue = "double", farringtonManningValue1 = "double", farringtonManningValue2 = "double" ) C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_RATES_CPP_CODE <- c( "#include ", # "#include ", "using namespace Rcpp;", "typedef double (*calcSubjectsFunctionRatesPtrTemp)(", " int stage, ", " bool riskRatio, ", " double thetaH0, ", " int groups, ", " NumericVector plannedSubjects, ", " bool directionUpper, ", " NumericVector allocationRatioPlanned, ", " NumericVector minNumberOfSubjectsPerStage, ", " NumericVector maxNumberOfSubjectsPerStage, ", " NumericVector sampleSizesPerStage, ", " NumericVector conditionalPower, ", " NumericVector overallRate, ", " double conditionalCriticalValue, ", " double farringtonManningValue1, ", " double farringtonManningValue2); ", "double getSimulationRatesStageSubjectsTemp(", " int stage, ", " bool riskRatio, ", " double thetaH0, ", " int groups, ", " NumericVector plannedSubjects, ", " bool directionUpper, ", " NumericVector allocationRatioPlanned, ", " NumericVector minNumberOfSubjectsPerStage, ", " NumericVector maxNumberOfSubjectsPerStage, ", " NumericVector sampleSizesPerStage, ", " NumericVector conditionalPower, ", " NumericVector overallRate, ", " double conditionalCriticalValue, ", " double farringtonManningValue1, ", " double farringtonManningValue2) { ", " {USER_CODE}", "} ", "// [[Rcpp::export]] ", "Rcpp::XPtr calcSubjectsFunctionCppTemp() { ", " return Rcpp::XPtr(", " new calcSubjectsFunctionRatesPtrTemp(&getSimulationRatesStageSubjectsTemp));", "}" ) C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_SURVIVAL_ARGUMENTS <- list( stage = "int", conditionalPower = "double", thetaH0 = "double", estimatedTheta = "double", plannedEvents = "NumericVector", eventsOverStages = "NumericVector", minNumberOfEventsPerStage = "NumericVector", maxNumberOfEventsPerStage = "NumericVector", allocationRatioPlanned = "double", conditionalCriticalValue = "double" ) C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_SURVIVAL_CPP_CODE <- c( "#include ", # "#include ", "using namespace Rcpp;", "typedef double (*calcEventsFunctionSurvivalPtrTemp)(", " int stage, ", " double conditionalPower, ", " double thetaH0, ", " double estimatedTheta, ", " NumericVector plannedEvents, ", " NumericVector eventsOverStages, ", " NumericVector minNumberOfEventsPerStage, ", " NumericVector maxNumberOfEventsPerStage, ", " double allocationRatioPlanned, ", " double conditionalCriticalValue); ", "double getSimulationSurvivalStageEventsTemp(", " int stage, ", " double conditionalPower, ", " double thetaH0, ", " double estimatedTheta, ", " NumericVector plannedEvents, ", " NumericVector eventsOverStages, ", " NumericVector minNumberOfEventsPerStage, ", " NumericVector maxNumberOfEventsPerStage, ", " double allocationRatioPlanned, ", " double conditionalCriticalValue) { ", " {USER_CODE}", "} ", "// [[Rcpp::export]] ", "Rcpp::XPtr calcEventsFunctionCppTemp() { ", " return Rcpp::XPtr(", " new calcEventsFunctionSurvivalPtrTemp(&getSimulationSurvivalStageEventsTemp));", "}" ) C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_MEANS <- "base_means" C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_RATES <- "base_rates" C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_SURVIVAL <- "base_survival" C_SIMULATION_CALC_SUBJECTS_FUNCTION_CPP_CODE <- list() C_SIMULATION_CALC_SUBJECTS_FUNCTION_CPP_CODE[[C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_MEANS]] <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_MEANS_CPP_CODE C_SIMULATION_CALC_SUBJECTS_FUNCTION_CPP_CODE[[C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_RATES]] <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_RATES_CPP_CODE C_SIMULATION_CALC_SUBJECTS_FUNCTION_CPP_CODE[[C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_SURVIVAL]] <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_SURVIVAL_CPP_CODE C_SIMULATION_CALC_SUBJECTS_FUNCTION_ARGUMENTS <- list() C_SIMULATION_CALC_SUBJECTS_FUNCTION_ARGUMENTS[[C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_MEANS]] <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_MEANS_ARGUMENTS C_SIMULATION_CALC_SUBJECTS_FUNCTION_ARGUMENTS[[C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_RATES]] <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_RATES_ARGUMENTS C_SIMULATION_CALC_SUBJECTS_FUNCTION_ARGUMENTS[[C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_SURVIVAL]] <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_SURVIVAL_ARGUMENTS .regexprCalcSubjectsFunction <- function(pattern, cmd, ..., language = c("cpp", "R")) { language <- match.arg(language) x1 <- regexpr(pattern, cmd) if (x1 == -1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, ifelse(language == "cpp", "the function definition must match 'double myFunctionName(myArgs) { myCode; }'", "the function definition must match 'myFunctionName <- (myArgs) { myCode }'" ) ) } len <- attr(x1, "match.length") return(list(value = x1, len = len)) } .isCppCode <- function(code) { if (is.null(code) || length(code) == 0 || all(is.na(code)) || !is.character(code)) { return(FALSE) } return(any(grepl("(int|bool|double|NumericVector) +", code))) } .getCalcSubjectsFunctionRCode <- function(cmd, cppCodeBodyType) { .assertIsCharacter(cmd, "cmd") .assertIsCharacter(cppCodeBodyType, "cppCodeBodyType") cmd <- paste0(cmd, collapse = "\n") cmd <- trimws(cmd) validArgsList <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_ARGUMENTS[[cppCodeBodyType]] validArgs <- names(validArgsList) fun <- eval(parse(text = cmd)) args <- methods::formalArgs(fun) args <- args[args != "..."] if (!all(args %in% validArgs)) { invalidArgs <- args[!(args %in% validArgs)] stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the argument", ifelse(length(invalidArgs) == 1, "", "s"), " ", .arrayToString(invalidArgs, encapsulate = TRUE), " ", ifelse(length(invalidArgs) == 1, "is", "are"), " invalid (the ", length(validArgs), " valid arguments can be found in the reference manual)" ) } bodyStartIndex <- .regexprCalcSubjectsFunction("\\{", cmd, language = "R")$value functionBody <- substring(cmd, bodyStartIndex, nchar(cmd)) functionCmd <- paste0("function(..., ", paste0(validArgs, collapse = ", "), ") ", functionBody) return(eval(parse(text = functionCmd))) } .getCalcSubjectsFunctionCppCode <- function(cmd, cppCodeBodyType) { .assertIsCharacter(cmd, "cmd") .assertIsCharacter(cppCodeBodyType, "cppCodeBodyType") cppCodeBody <- "" if (length(cppCodeBodyType) == 1 && cppCodeBodyType %in% names(C_SIMULATION_CALC_SUBJECTS_FUNCTION_CPP_CODE)) { cppCodeBody <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_CPP_CODE[[cppCodeBodyType]] } cppCodeBody <- paste0(cppCodeBody, collapse = "\n") if (!grepl("#include ", cppCodeBody)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'cppCodeBody' must contain '#include '") } cmd <- paste0(cmd, collapse = "\n") cmd <- trimws(cmd) validArgsList <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_ARGUMENTS[[cppCodeBodyType]] validArgs <- paste(validArgsList, names(validArgsList)) len <- .regexprCalcSubjectsFunction("double +[a-zA-Z_0-9]{1,100}\\(", cmd)$len args <- trimws(substring(cmd, len + 1, nchar(cmd))) pos <- .regexprCalcSubjectsFunction("\\) *\\{.*", args)$value args <- substring(args, 1, pos - 1) if (grepl(",", args)) { args <- strsplit(args, "[ \r\n\t]*,[ \r\n\t]*")[[1]] } if (!all(args %in% validArgs)) { invalidArgs <- args[!(args %in% validArgs)] stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the argument", ifelse(length(invalidArgs) == 1, "", "s"), " ", .arrayToString(invalidArgs, encapsulate = TRUE), " ", ifelse(length(invalidArgs) == 1, "is", "are"), " invalid (the ", length(validArgs), " valid arguments can be found in the reference manual)" ) } pattern <- paste0( "double +[a-zA-Z_0-9]{1,100}\\([ \r\n\t]*", "(", paste0(paste0("(", args, ")"), collapse = "|"), "|([ \r\n\t]*,[ \r\n\t]*))*", "\\)[ \r\n\t]*\\{" ) len <- .regexprCalcSubjectsFunction(pattern, cmd)$len code <- trimws(substring(cmd, len + 1, nchar(cmd))) code <- trimws(sub("}[ \\r\\n]*$", "", code)) calcSubjectsFunctionCode <- sub("\\{USER_CODE\\}", code, cppCodeBody) return(calcSubjectsFunctionCode) } .getCalcSubjectsFunction <- function(..., design, simulationResults, calcFunction, expectedFunction, cppEnabled = TRUE) { .assertIsTrialDesign(design) .assertIsSimulationResults(simulationResults) .assertIsSingleLogical(cppEnabled, "cppEnabled") cppCodeBodyType <- NA_character_ if (inherits(simulationResults, "SimulationResultsMeans")) { cppCodeBodyType <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_MEANS } if (inherits(simulationResults, "SimulationResultsRates")) { cppCodeBodyType <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_RATES } if (inherits(simulationResults, "SimulationResultsSurvival")) { cppCodeBodyType <- C_SIMULATION_CALC_SUBJECTS_FUNCTION_BASE_SURVIVAL } if (is.na(cppCodeBodyType)) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, ".getCalcSubjectsFunction() is not implemented for object ", class(simulationResults)[1] ) } functionFieldName <- ifelse(inherits(simulationResults, "SimulationResultsSurvival"), "calcEventsFunction", "calcSubjectsFunction" ) if (design$kMax == 1) { if (!is.null(calcFunction)) { warning("'", functionFieldName, "' will be ignored for fixed sample design", call. = FALSE) } simulationResults$.setParameterType(functionFieldName, C_PARAM_NOT_APPLICABLE) return(list( calcSubjectsFunctionR = NULL, calcSubjectsFunctionCpp = NULL, calcSubjectsFunctionType = 0 )) } if (is.null(calcFunction)) { simulationResults$.setParameterType(functionFieldName, C_PARAM_DEFAULT_VALUE) if (!cppEnabled) { calcFunction <- expectedFunction simulationResults[[functionFieldName]] <- calcFunction } return(list( calcSubjectsFunctionR = calcFunction, calcSubjectsFunctionCpp = NULL, calcSubjectsFunctionType = 0 )) } simulationResults$.setParameterType(functionFieldName, C_PARAM_USER_DEFINED) calcSubjectsFunctionType <- 0 calcSubjectsFunctionR <- NULL if (is.function(calcFunction)) { .assertIsValidFunction( fun = calcFunction, funArgName = functionFieldName, expectedFunction = expectedFunction ) simulationResults[[functionFieldName]] <- calcFunction return(list( calcSubjectsFunctionR = calcFunction, calcSubjectsFunctionCpp = NULL, calcSubjectsFunctionType = 1 )) } if (!is.character(calcFunction)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", functionFieldName, "' must be a function or a character ", "string specifying a function written in R/C++/Rcpp" ) } if (.isCppCode(calcFunction)) { tryCatch( { survivalEnabled <- inherits(simulationResults, "SimulationResultsSurvival") expectedFunctionName <- ifelse(survivalEnabled, "calcEventsFunctionCppTemp", "calcSubjectsFunctionCppTemp" ) calcSubjectsFunctionCppTemp <- NULL calcEventsFunctionCppTemp <- NULL result <- Rcpp::sourceCpp(code = .getCalcSubjectsFunctionCppCode( calcFunction, cppCodeBodyType )) functionName <- result$functions if (length(functionName) == 0) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "C++ compilation returned an unexpected value") } if (functionName != expectedFunctionName) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "C++ compilation returned an unexpected ", "function name (", sQuote(functionName), " instead of ", sQuote(expectedFunctionName), ")" ) } simulationResults[[functionFieldName]] <- calcFunction if (!exists(functionName)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, sQuote(functionName), " is missing") } if (survivalEnabled) { return(list( calcSubjectsFunctionR = NULL, calcSubjectsFunctionCpp = calcEventsFunctionCppTemp(), calcSubjectsFunctionType = 2 )) } else { return(list( calcSubjectsFunctionR = NULL, calcSubjectsFunctionCpp = calcSubjectsFunctionCppTemp(), calcSubjectsFunctionType = 2 )) } }, error = function(e) { cat(.getCalcSubjectsFunctionCppCode( calcFunction, cppCodeBodyType ), "\n") Rcpp::sourceCpp( code = .getCalcSubjectsFunctionCppCode( calcFunction, cppCodeBodyType ), verbose = FALSE, showOutput = TRUE ) stop("Failed to compile '", functionFieldName, "': ", e$message) } ) } tryCatch( { calcSubjectsFunctionR <- .getCalcSubjectsFunctionRCode(calcFunction, cppCodeBodyType) simulationResults[[functionFieldName]] <- calcSubjectsFunctionR return(list( calcSubjectsFunctionR = calcSubjectsFunctionR, calcSubjectsFunctionCpp = NULL, calcSubjectsFunctionType = 1 )) }, error = function(e) { stop("Failed to evaluate and parse '", functionFieldName, "': ", e$message) } ) return(list( calcSubjectsFunctionR = NULL, calcSubjectsFunctionCpp = NULL, calcSubjectsFunctionType = 0 )) } rpact/R/f_simulation_performance_score.R0000644000176200001440000001717414450463134020172 0ustar liggesusers## | ## | *Performance score functions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7147 $ ## | Last changed: $Date: 2023-07-03 08:10:31 +0200 (Mo, 03 Jul 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' #' @title #' Get Performance Score #' #' @description #' Calculates the conditional performance score, its sub-scores and components according to #' Herrmann et al. (2020) for a given simulation result from a two-stage design. #' Larger (sub-)score and component values refer to a better performance. #' #' @param simulationResult A simulation result. #' #' @details #' The conditional performance score consists of two sub-scores, one for the sample size #' (subscoreSampleSize) and one for the conditional power (subscoreConditionalPower). #' Each of those are composed of a location (locationSampleSize, locationConditionalPower) #' and variation component (variationSampleSize, variationConditionalPower). #' The term conditional refers to an evaluation perspective where the interim results #' suggest a trial continuation with a second stage. #' The score can take values between 0 and 1. More details on the performance score #' can be found in Herrmann et al. (2020). #' #' @template examples_get_performance_score #' #' @author Stephen Schueuerhuis #' #' @export #' getPerformanceScore <- function(simulationResult) { .assertIsSimulationResults(simulationResult) design <- simulationResult$.design if (!inherits(simulationResult, "SimulationResultsMeans")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "performance score so far implemented only for single comparisons with continuous endpoints" ) } if (design$kMax != 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "performance score so far implemented only for two-stage designs" ) } # initialize necessary sample size values plannedSubjects <- simulationResult$plannedSubjects maxAdditionalNumberOfSubjects <- ifelse(is.na(simulationResult$conditionalPower), plannedSubjects[2] - plannedSubjects[1], simulationResult$maxNumberOfSubjectsPerStage[2] ) # number of iterations iterations <- simulationResult$maxNumberOfIterations # target CP targetConditionalPower <- ifelse(is.na(simulationResult$conditionalPower), 1 - design$beta, simulationResult$conditionalPower ) args <- list( design = getDesignGroupSequential( kMax = 1, alpha = design$alpha, beta = design$beta ), thetaH0 = 0, normalApproximation = TRUE, groups = simulationResult$groups ) alternativeParamName <- NA_character_ referenceValue <- NA_real_ # simulated alternative values if (methods::is(simulationResult, "SimulationResultsMeans")) { alternativeParamName <- "alternative" referenceValue <- 0 } else if (methods::is(simulationResult, "SimulationResultsRates")) { alternativeParamName <- "pi1" referenceValue <- simulationResult$pi2 args$pi2 <- referenceValue } else { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "performance score is not available for class ", class(simulationResult)[1] ) } alternativeValues <- simulationResult[[alternativeParamName]] simData <- simulationResult$.data resultMatrix <- sapply(alternativeValues, FUN = function(alternativeValue) { args[[alternativeParamName]] <- alternativeValue if (alternativeValue == referenceValue) { singleStageSampleSize <- plannedSubjects[1] } else if (methods::is(simulationResult, "SimulationResultsMeans")) { singleStageSampleSize <- do.call(getSampleSizeMeans, args)$numberOfSubjects } else if (methods::is(simulationResult, "SimulationResultsRates")) { singleStageSampleSize <- do.call(getSampleSizeRates, args)$numberOfSubjects } # iterations in which the trial has proceed to stage two secondStageIterations <- simData[ simData$stageNumber == 2 & simData[[alternativeParamName]] == alternativeValue, ] # mean and variance estimates for sample size and conditional power meanSampleSize <- mean(secondStageIterations$numberOfCumulatedSubjects, na.rm = TRUE) varSampleSize <- stats::var(secondStageIterations$numberOfCumulatedSubjects, na.rm = TRUE) meanConditionalPower <- mean(secondStageIterations$conditionalPowerAchieved, na.rm = TRUE) varConditionalPower <- stats::var(secondStageIterations$conditionalPowerAchieved, na.rm = TRUE) # target sample size: single stage sample size if it doesn't exceed maximum admissible # sample size, otherwise only first stage sample size targetSampleSize <- ifelse(singleStageSampleSize <= (maxAdditionalNumberOfSubjects + plannedSubjects[1]), singleStageSampleSize, plannedSubjects[1] ) # sample size components locationSampleSize <- 1 - abs(meanSampleSize - targetSampleSize) / maxAdditionalNumberOfSubjects maxVariationSampleSize <- (maxAdditionalNumberOfSubjects / 2)^2 * iterations / (iterations - 1) variationSampleSize <- 1 - sqrt(varSampleSize / maxVariationSampleSize) subscoreSampleSize <- mean(c(locationSampleSize, variationSampleSize), na.rm = TRUE) # conditional power components locationConditionalPower <- 1 - abs(meanConditionalPower - targetConditionalPower) / (1 - design$alpha) maxVariationConditionalPower <- (1 / 2)^2 * iterations / (iterations - 1) variationConditionalPower <- 1 - sqrt(varConditionalPower / maxVariationConditionalPower) subscoreConditionalPower <- mean(c(locationConditionalPower, variationConditionalPower), na.rm = TRUE) # performance score calculation performanceScore <- mean(c(subscoreSampleSize, subscoreConditionalPower), na.rm = TRUE) return(c( alternative = alternativeValue, reference = referenceValue, locationSampleSize = locationSampleSize, variationSampleSize = variationSampleSize, subscoreSampleSize = subscoreSampleSize, locationConditionalPower = locationConditionalPower, variationConditionalPower = variationConditionalPower, subscoreConditionalPower = subscoreConditionalPower, performanceScore = performanceScore )) }) performanceScore <- PerformanceScore(simulationResult) performanceScore$.alternative <- alternativeValues paramNames <- rownames(resultMatrix) for (k in 1:nrow(resultMatrix)) { paramName <- paramNames[k] performanceScore[[paramName]] <- resultMatrix[k, ] performanceScore$.setParameterType(paramName, C_PARAM_GENERATED) } warning("The performance score function is experimental and hence not fully validated ", "(see www.rpact.com/experimental)", call. = FALSE ) return(performanceScore) } rpact/R/f_design_group_sequential.R0000644000176200001440000027673514446750002017161 0ustar liggesusers## | ## | *Group sequential design* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_constants.R NULL .getGroupSequentialProbabilities <- function(decisionMatrix, informationRates) { return(getGroupSequentialProbabilitiesCpp(decisionMatrix, informationRates)) } #' @title #' Get Group Sequential Probabilities #' #' @description #' Calculates probabilities in the group sequential setting. #' #' @param decisionMatrix A matrix with either 2 or 4 rows and kMax = length(informationRates) columns, see details. #' @inheritParams param_informationRates #' #' @details #' Given a sequence of information rates (fixing the correlation structure), and #' decisionMatrix with either 2 or 4 rows and kMax = length(informationRates) columns, #' this function calculates a probability matrix containing, for two rows, the probabilities:\cr #' P(Z_1 <- l_1), P(l_1 <- Z_1 < u_1, Z_2 < l_1),..., P(l_kMax-1 <- Z_kMax-1 < u_kMax-1, Z_kMax < l_l_kMax)\cr #' P(Z_1 <- u_1), P(l_1 <- Z_1 < u_1, Z_2 < u_1),..., P(l_kMax-1 <- Z_kMax-1 < u_kMax-1, Z_kMax < u_l_kMax)\cr #' P(Z_1 <- Inf), P(l_1 <- Z_1 < u_1, Z_2 < Inf),..., P(l_kMax-1 <- Z_kMax-1 < u_kMax-1, Z_kMax < Inf)\cr #' with continuation matrix\cr #' l_1,...,l_kMax\cr #' u_1,...,u_kMax\cr #' For 4 rows, the continuation region contains of two regions and the probability matrix is #' obtained analogously (cf., Wassmer and Brannath, 2016). #' #' @family design functions #' #' @template examples_get_group_sequential_probabilities #' #' @return Returns a numeric matrix containing the probabilities described in the details section. #' #' @export #' getGroupSequentialProbabilities <- function(decisionMatrix, informationRates) { .assertAreValidInformationRates(informationRates) .assertIsValidDecisionMatrix(decisionMatrix, length(informationRates)) return(.getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = informationRates)) } .validateGroupSequentialProbabilityResultsMulti <- function(...) { args <- list(...) for (variableName in names(args)) { if (!.validateGroupSequentialProbabilityResults(results = args[[variableName]], variableName)) { return(invisible()) } } } .validateGroupSequentialProbabilityResults <- function(results, variableName) { numberOfNAs <- sum(is.na(results)) if (numberOfNAs == 0) { return(TRUE) } warning(sprintf( paste0( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "in .getGroupSequentialProbabilities(): ", "variable '%s' contains %s NA's (%.1f%%)" ), variableName, numberOfNAs, 100 * numberOfNAs / length(results) ), call. = FALSE) return(FALSE) } .validateTypeOfDesign <- function(design) { .assertDesignParameterExists(design, "typeOfDesign", C_DEFAULT_TYPE_OF_DESIGN) design$.setParameterType("userAlphaSpending", C_PARAM_NOT_APPLICABLE) design$.setParameterType("userBetaSpending", C_PARAM_NOT_APPLICABLE) design$.setParameterType("deltaWT", C_PARAM_NOT_APPLICABLE) design$.setParameterType("deltaPT1", C_PARAM_NOT_APPLICABLE) design$.setParameterType("deltaPT0", C_PARAM_NOT_APPLICABLE) design$.setParameterType("optimizationCriterion", C_PARAM_NOT_APPLICABLE) design$.setParameterType("gammaA", C_PARAM_NOT_APPLICABLE) design$.setParameterType("gammaB", C_PARAM_NOT_APPLICABLE) design$.setParameterType("typeBetaSpending", C_PARAM_NOT_APPLICABLE) design$.setParameterType("constantBoundsHP", C_PARAM_NOT_APPLICABLE) if (!(design$typeOfDesign %in% .getDesignTypes())) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "type of design (", design$typeOfDesign, ") must be one of the following: ", .printDesignTypes() ) } if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT) { .assertDesignParameterExists(design, "deltaWT", NA_real_) .assertIsSingleNumber(design$deltaWT, "deltaWT", naAllowed = FALSE) .assertIsInClosedInterval(design$deltaWT, "deltaWT", lower = -0.5, upper = 1) } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_PT) { .assertDesignParameterExists(design, "deltaPT1", NA_real_) .assertIsSingleNumber(design$deltaPT1, "deltaPT1", naAllowed = FALSE) .assertIsInClosedInterval(design$deltaPT1, "deltaPT1", lower = -0.5, upper = 1) .assertDesignParameterExists(design, "deltaPT0", NA_real_) .assertIsSingleNumber(design$deltaPT0, "deltaPT0", naAllowed = FALSE) .assertIsInClosedInterval(design$deltaPT0, "deltaPT0", lower = -0.5, upper = 1) } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT_OPTIMUM) { .assertDesignParameterExists(design, "optimizationCriterion", C_OPTIMIZATION_CRITERION_DEFAULT) if (!.isOptimizationCriterion(design$optimizationCriterion)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "optimization criterion must be one of the following: ", .printOptimizationCriterion() ) } } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_HP) { .assertDesignParameterExists(design, "constantBoundsHP", C_CONST_BOUND_HP_DEFAULT) .assertIsSingleNumber(design$constantBoundsHP, "constantBoundsHP") .assertIsInClosedInterval(design$constantBoundsHP, "constantBoundsHP", lower = 2, upper = NULL) } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_AS_KD) { .assertDesignParameterExists(design, "gammaA", NA_real_) .assertIsSingleNumber(design$gammaA, "gammaA", naAllowed = FALSE) if (design$gammaA < 0.4 || design$gammaA > 8) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "parameter 'gammaA' (", design$gammaA, ") for Kim & DeMets alpha ", "spending function is out of bounds [0.4; 8]" ) } } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_AS_HSD) { .assertDesignParameterExists(design, "gammaA", NA_real_) .assertIsSingleNumber(design$gammaA, "gammaA", naAllowed = FALSE) if (design$gammaA < -10 || design$gammaA > 5) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "Parameter 'gammaA' (", design$gammaA, ") for Hwang, Shih & DeCani ", "alpha spending function is out of bounds [-10; 5]" ) } } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_AS_USER) { .validateUserAlphaSpending(design) design$.setParameterType("userAlphaSpending", C_PARAM_USER_DEFINED) } if (.isUndefinedArgument(design$alpha)) { design$alpha <- C_ALPHA_DEFAULT design$.setParameterType("alpha", C_PARAM_DEFAULT_VALUE) } if (design$typeOfDesign == C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY) { .assertIsValidAlpha(design$alpha) design$.setParameterType("userAlphaSpending", C_PARAM_DEFAULT_VALUE) } if ((.isBetaSpendingDesignType(design$typeBetaSpending) || !.isAlphaSpendingDesignType(design$typeOfDesign)) && (design$informationRates[length(design$informationRates)] != 1)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "For specified design, last information rate should be equal 1" ) } if (.isAlphaSpendingDesignType(design$typeOfDesign)) { .assertDesignParameterExists(design, "typeBetaSpending", C_TYPE_OF_DESIGN_BS_NONE) if (!.isBetaSpendingDesignType(design$typeBetaSpending, noneIncluded = TRUE)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "type of beta spending must be one of the following: ", .printBetaSpendingDesignTypes() ) } if (design$typeBetaSpending == C_TYPE_OF_DESIGN_BS_KD) { .assertDesignParameterExists(design, "gammaB", NA_real_) .assertIsSingleNumber(design$gammaB, "gammaB", naAllowed = FALSE) if (design$gammaB < 0.4 || design$gammaB > 8) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "parameter 'gammaB' (", design$gammaB, ") for Kim & DeMets beta ", "spending function out of bounds [0.4; 8]" ) } } if (design$typeBetaSpending == C_TYPE_OF_DESIGN_BS_HSD) { .assertDesignParameterExists(design, "gammaB", NA_real_) .assertIsSingleNumber(design$gammaB, "gammaB", naAllowed = FALSE) if (design$gammaB < -10 || design$gammaB > 5) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "parameter 'gammaB' (", design$gammaB, ") for Hwang, Shih & DeCani ", "beta spending out of bounds [-10; 5]" ) } } if (design$typeBetaSpending == C_TYPE_OF_DESIGN_BS_USER) { .validateUserBetaSpending(design) design$.setParameterType("userBetaSpending", C_PARAM_USER_DEFINED) } } else { if (.designParameterExists(design, "typeBetaSpending") && design$typeBetaSpending != C_TYPE_OF_DESIGN_BS_NONE) { warning("'typeBetaSpending' (", design$typeBetaSpending, ") will be ignored ", "because 'typeOfDesign' (", design$typeOfDesign, ") is not an alpha spending design", call. = FALSE ) design$typeBetaSpending <- C_TYPE_OF_DESIGN_BS_NONE design$.setParameterType("typeBetaSpending", C_PARAM_DEFAULT_VALUE) } if (.designParameterExists(design, "userBetaSpending")) { userBetaSpending <- NA_real_ warning("'userBetaSpending' (", .arrayToString(design$userBetaSpending), ") will be ignored ", "because 'typeOfDesign' (", design$typeOfDesign, ") is not an alpha spending design", call. = FALSE ) } } if (.isUndefinedArgument(design$beta)) { design$beta <- C_BETA_DEFAULT design$.setParameterType("beta", C_PARAM_DEFAULT_VALUE) } return(invisible(design)) } .validateBaseParameters <- function(design, twoSidedWarningForDefaultValues = TRUE) { if (.isDefinedArgument(design$kMax)) { .assertDesignParameterExists(design, "kMax", C_KMAX_DEFAULT) .assertIsValidKMax(design$kMax) if (.isDefinedArgument(design$informationRates)) { .assertAreValidInformationRates(design$informationRates, design$kMax) } if (.isDefinedArgument(design$futilityBounds)) { .assertAreValidFutilityBounds(design$futilityBounds, design$kMax) } } .assertDesignParameterExists(design, "sided", C_SIDED_DEFAULT) .assertIsValidSidedParameter(design$sided) .setKmaxBasedOnAlphaSpendingDefintion(design) design$informationRates <- .getValidatedInformationRates(design) design$futilityBounds <- .getValidatedFutilityBounds(design, twoSidedWarningForDefaultValues = twoSidedWarningForDefaultValues ) .assertDesignParameterExists(design, "tolerance", C_DESIGN_TOLERANCE_DEFAULT) if (design$tolerance < 1e-10 || design$tolerance > 1e-03) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'tolerance' (", design$tolerance, ") out of bounds [1e-10; 1e-03]" ) } return(invisible(design)) } .createDesign <- function(..., designClass, kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = C_SIDED_DEFAULT, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = NA_real_, deltaPT1 = NA_real_, deltaPT0 = NA_real_, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = NA_real_, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = NA_real_, bindingFutility = C_BINDING_FUTILITY_DEFAULT, constantBoundsHP = C_CONST_BOUND_HP_DEFAULT, twoSidedPower = NA, betaAdjustment = NA, delayedInformation = NA_real_, tolerance = C_DESIGN_TOLERANCE_DEFAULT) { .assertIsSingleInteger(kMax, "kMax", naAllowed = TRUE, validateType = FALSE) .assertIsSingleCharacter(typeOfDesign, "typeOfDesign") if (typeOfDesign == C_TYPE_OF_DESIGN_AS_USER && !any(is.na(userAlphaSpending))) { if (!is.na(kMax) && kMax != length(userAlphaSpending)) { stop(sprintf( paste0( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'userAlphaSpending' (%s) must be equal to 'kMax' (%s)" ), length(userAlphaSpending), kMax )) } kMax <- length(userAlphaSpending) if (kMax > 1 && all(userAlphaSpending[1:(kMax - 1)] == 0)) { message("Changed type of design to ", sQuote(C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY)) typeOfDesign <- C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY } } .assertIsSingleLogical(bindingFutility, "bindingFutility") .assertIsNumericVector(delayedInformation, "delayedInformation", naAllowed = TRUE) .assertIsInClosedInterval(delayedInformation, "delayedInformation", lower = 0, upper = NULL, naAllowed = TRUE) if (designClass == C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) { design <- TrialDesignInverseNormal( kMax = kMax, bindingFutility = bindingFutility, delayedInformation = delayedInformation ) } else if (designClass == C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL) { design <- TrialDesignGroupSequential( kMax = kMax, bindingFutility = bindingFutility, delayedInformation = delayedInformation ) } else { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designClass' ('", designClass, "') must be '", C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, "' or ", "'", C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL, "'" ) } .assertIsSingleInteger(sided, "sided", naAllowed = FALSE, validateType = FALSE) if (!is.integer(sided) && sided %in% c(1, 2)) { sided <- as.integer(sided) } .assertIsSingleCharacter(optimizationCriterion, "optimizationCriterion") .assertIsSingleCharacter(typeBetaSpending, "typeBetaSpending") .assertIsSingleLogical(twoSidedPower, "twoSidedPower", naAllowed = TRUE) .assertIsSingleLogical(betaAdjustment, "betaAdjustment", naAllowed = TRUE) .assertIsSingleNumber(alpha, "alpha", naAllowed = TRUE) .assertIsSingleNumber(beta, "beta", naAllowed = TRUE) .assertIsSingleNumber(deltaWT, "deltaWT", naAllowed = TRUE) .assertIsSingleNumber(deltaPT1, "deltaPT1", naAllowed = TRUE) .assertIsSingleNumber(deltaPT0, "deltaPT0", naAllowed = TRUE) .assertIsSingleNumber(gammaA, "gammaA", naAllowed = TRUE) .assertIsSingleNumber(gammaB, "gammaB", naAllowed = TRUE) .assertIsNumericVector(futilityBounds, "futilityBounds", naAllowed = TRUE) .assertIsNumericVector(informationRates, "informationRates", naAllowed = TRUE) .assertIsNumericVector(userAlphaSpending, "userAlphaSpending", naAllowed = TRUE) .assertIsNumericVector(userBetaSpending, "userBetaSpending", naAllowed = TRUE) design$alpha <- alpha design$beta <- beta design$sided <- sided design$typeOfDesign <- typeOfDesign design$deltaWT <- deltaWT design$deltaPT1 <- deltaPT1 design$deltaPT0 <- deltaPT0 design$gammaA <- gammaA design$gammaB <- gammaB design$optimizationCriterion <- optimizationCriterion design$typeBetaSpending <- typeBetaSpending design$futilityBounds <- futilityBounds design$informationRates <- informationRates design$userAlphaSpending <- userAlphaSpending design$userBetaSpending <- userBetaSpending design$bindingFutility <- bindingFutility design$delayedInformation <- delayedInformation if (!all(is.na(delayedInformation)) && any(delayedInformation > 0)) { design$.setParameterType("delayedInformation", C_PARAM_USER_DEFINED) } if (design$typeOfDesign != C_TYPE_OF_DESIGN_WT_OPTIMUM && optimizationCriterion != C_OPTIMIZATION_CRITERION_DEFAULT) { warning( "'optimizationCriterion' (", optimizationCriterion, ") will be ignored because it is only applicable for 'typeOfDesign' = \"", C_TYPE_OF_DESIGN_WT_OPTIMUM, "\"", call. = FALSE ) } if (design$typeOfDesign == C_TYPE_OF_DESIGN_HP) { .assertIsSingleNumber(constantBoundsHP, "constantBoundsHP") .assertIsInClosedInterval(constantBoundsHP, "constantBoundsHP", lower = 2, upper = NULL) design$constantBoundsHP <- constantBoundsHP } else if (constantBoundsHP != C_CONST_BOUND_HP_DEFAULT) { warning( "'constantBoundsHP' (", constantBoundsHP, ") will be ignored because it is only applicable for 'typeOfDesign' = \"", C_TYPE_OF_DESIGN_HP, "\"", call. = FALSE ) } if (is.na(twoSidedPower)) { design$twoSidedPower <- C_TWO_SIDED_POWER_DEFAULT design$.setParameterType("twoSidedPower", C_PARAM_DEFAULT_VALUE) } else { design$twoSidedPower <- twoSidedPower design$.setParameterType("twoSidedPower", ifelse( twoSidedPower == C_TWO_SIDED_POWER_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) } if (design$sided == 2 && grepl("^bs", design$typeBetaSpending)) { if (is.na(betaAdjustment)) { design$betaAdjustment <- TRUE design$.setParameterType("betaAdjustment", C_PARAM_DEFAULT_VALUE) } else { design$betaAdjustment <- betaAdjustment design$.setParameterType("betaAdjustment", ifelse(betaAdjustment, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) } } else if (!is.na(betaAdjustment)) { warning( "'betaAdjustment' (", betaAdjustment, ") will be ignored because it is only applicable for two-sided beta-spending designs", call. = FALSE ) } design$tolerance <- tolerance return(design) } .getDesignGroupSequentialKMax1 <- function(design) { design$criticalValues <- .getOneMinusQNorm(design$alpha / design$sided) design$alphaSpent[1] <- design$alpha return(invisible(design)) } # # Wang and Tsiatis design # .getDesignGroupSequentialWangAndTsiatis <- function(design) { if (design$typeOfDesign == C_TYPE_OF_DESIGN_P) { design$criticalValues <- getDesignGroupSequentialPocockCpp( design$kMax, design$alpha, design$sided, design$informationRates, design$bindingFutility, design$futilityBounds, design$tolerance ) } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_OF) { design$criticalValues <- getDesignGroupSequentialOBrienAndFlemingCpp( design$kMax, design$alpha, design$sided, design$informationRates, design$bindingFutility, design$futilityBounds, design$tolerance ) } else { design$criticalValues <- getDesignGroupSequentialDeltaWTCpp( design$kMax, design$alpha, design$sided, design$informationRates, design$bindingFutility, design$futilityBounds, design$tolerance, design$deltaWT ) } .calculateAlphaSpent(design) return(invisible(design)) } .getDesignGroupSequentialPampallonaTsiatis <- function(design) { cppResult <- getDesignGroupSequentialPampallonaTsiatisCpp( design$tolerance, design$beta, design$alpha, design$kMax, design$deltaPT0, design$deltaPT1, design$informationRates, design$sided, design$bindingFutility ) futilityBounds <- cppResult$futilityBounds criticalValues <- cppResult$criticalValues probs <- cppResult$probs if (design$sided == 1) { design$betaSpent <- cumsum(probs[1, ]) design$power <- cumsum(probs[3, ] - probs[2, ]) } else { design$betaSpent <- cumsum(probs[3, ] - probs[2, ]) if (design$twoSidedPower) { design$power <- cumsum(probs[5, ] - probs[4, ] + probs[1, ]) } else { design$power <- cumsum(probs[5, ] - probs[4, ]) } } design$.setParameterType("betaSpent", C_PARAM_GENERATED) design$.setParameterType("power", C_PARAM_GENERATED) design$futilityBounds <- futilityBounds[1:(design$kMax - 1)] design$criticalValues <- criticalValues design$.setParameterType("futilityBounds", C_PARAM_GENERATED) design$.setParameterType("criticalValues", C_PARAM_GENERATED) .calculateAlphaSpent(design) design$futilityBounds[design$futilityBounds == 0] <- NA_real_ .assertIsValidBetaSpent(design) return(invisible(design)) } .calculateAlphaSpent <- function(design) { if (design$sided == 2) { if (design$bindingFutility) { futilityBounds <- design$futilityBounds futilityBounds[is.na(futilityBounds)] <- 0 decisionMatrix <- matrix(c( -design$criticalValues, -futilityBounds, 0, futilityBounds, 0, design$criticalValues ), nrow = 4, byrow = TRUE) } else { decisionMatrix <- matrix(c(-design$criticalValues, design$criticalValues), nrow = 2, byrow = TRUE) } } else { if (design$bindingFutility) { decisionMatrix <- matrix(c( design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues ), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c( rep(C_FUTILITY_BOUNDS_DEFAULT, design$kMax), design$criticalValues ), nrow = 2, byrow = TRUE) } } tryCatch( { probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) if (design$sided == 1) { design$alphaSpent <- cumsum(probs[3, ] - probs[2, ]) } else if (nrow(decisionMatrix) == 2) { design$alphaSpent <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { design$alphaSpent <- cumsum(probs[5, ] - probs[4, ] + probs[1, ]) } if (!is.na(design$alphaSpent[design$kMax])) { design$alphaSpent[design$kMax] <- floor(design$alphaSpent[design$kMax] * 1e8) / 1e8 } design$.setParameterType("alphaSpent", C_PARAM_GENERATED) }, error = function(e) { warning("Failed to calculate 'alphaSpent': ", e, call. = FALSE) } ) } #' #' Haybittle & Peto design #' #' @noRd #' .getDesignGroupSequentialHaybittleAndPeto <- function(design) { scale <- .getOneDimensionalRoot( function(scale) { design$criticalValues <- c(rep(design$constantBoundsHP, design$kMax - 1), scale) if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues, design$criticalValues), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ] + probs[1, ]) - design$alpha) } else { if (design$bindingFutility) { decisionMatrix <- matrix(c( design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues ), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c( rep(C_FUTILITY_BOUNDS_DEFAULT, design$kMax), design$criticalValues ), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ]) - design$alpha) } }, lower = 0, upper = 8, tolerance = design$tolerance, callingFunctionInformation = ".getDesignGroupSequentialHaybittleAndPeto" ) design$criticalValues <- c(rep(design$constantBoundsHP, design$kMax - 1), scale) .calculateAlphaSpent(design) return(invisible(design)) } .getOptimumDesign <- function(deltaWT, design) { scale <- .getOneDimensionalRoot( function(scale) { criticalValues <- scale * design$informationRates^(deltaWT - 0.5) if (design$sided == 2) { decisionMatrix <- (matrix(c(-criticalValues, criticalValues), nrow = 2, byrow = TRUE)) probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ] + probs[1, ]) - design$alpha) } else { if (design$bindingFutility) { decisionMatrix <- matrix(c( design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, criticalValues ), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c( rep(C_FUTILITY_BOUNDS_DEFAULT, design$kMax), criticalValues ), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix, design$informationRates) return(sum(probs[3, ] - probs[2, ]) - design$alpha) } }, lower = 0, upper = 5, tolerance = design$tolerance, callingFunctionInformation = ".getOptimumDesign" ) design$criticalValues <- scale * design$informationRates^(deltaWT - 0.5) designCharacteristics <- .getDesignCharacteristics(design = design) y <- NA_integer_ if (design$optimizationCriterion == C_OPTIMIZATION_CRITERION_ASNH1) { y <- designCharacteristics$averageSampleNumber1 } if (design$optimizationCriterion == C_OPTIMIZATION_CRITERION_ASNIFH1) { y <- designCharacteristics$inflationFactor + designCharacteristics$averageSampleNumber1 } if (design$optimizationCriterion == C_OPTIMIZATION_CRITERION_ASN_SUM) { y <- designCharacteristics$averageSampleNumber0 + designCharacteristics$averageSampleNumber01 + designCharacteristics$averageSampleNumber1 } return(y) } #' #' Optimum design within Wang and Tsiatis class #' #' @noRd #' .getDesignGroupSequentialWangAndTsiatisOptimum <- function(design) { .assertDesignParameterExists(design, "optimizationCriterion", C_OPTIMIZATION_CRITERION_DEFAULT) .assertIsOptimizationCriterion(design$optimizationCriterion) optimumDesign <- stats::optimize(.getOptimumDesign, design = design, interval = c(0, 1), tol = 0.0001 ) design$deltaWT <- round(optimumDesign$minimum, 3) design$.setParameterType("deltaWT", C_PARAM_GENERATED) # Recalculation of design characteristics with rounded design$deltaWT design$criticalValues <- getDesignGroupSequentialDeltaWTCpp( design$kMax, design$alpha, design$sided, design$informationRates, design$bindingFutility, design$futilityBounds, design$tolerance, design$deltaWT ) designCharacteristics <- .getDesignCharacteristics(design = design) design$power <- designCharacteristics$power design$.setParameterType("power", C_PARAM_GENERATED) .calculateAlphaSpent(design) return(invisible(design)) } #' #' Alpha spending approaches #' #' @noRd #' .getDesignGroupSequentialAlphaSpending <- function(design, userFunctionCallEnabled) { design$criticalValues <- getDesignGroupSequentialAlphaSpendingCpp( design$kMax, design$alpha, design$gammaA, design$typeOfDesign, design$sided, design$informationRates, design$bindingFutility, design$futilityBounds, design$tolerance ) .calculateAlphaSpent(design) return(.getDesignGroupSequentialBetaSpendingApproaches(design, userFunctionCallEnabled)) } #' #' User defined alpha spending approach #' #' @noRd #' .getDesignGroupSequentialUserDefinedAlphaSpending <- function(design, userFunctionCallEnabled) { design$criticalValues <- rep(NA_real_, design$kMax) if (design$typeOfDesign == C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY) { design$userAlphaSpending <- rep(0, design$kMax) design$userAlphaSpending[design$kMax] <- design$alpha } if (design$typeOfDesign == C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY && !design$bindingFutility) { design$criticalValues[1:(design$kMax - 1)] <- C_QNORM_THRESHOLD design$criticalValues[design$kMax] <- .getOneMinusQNorm(design$alpha / design$sided) } else { design$criticalValues <- getDesignGroupSequentialUserDefinedAlphaSpendingCpp( design$kMax, design$userAlphaSpending, design$sided, design$informationRates, design$bindingFutility, design$futilityBounds, design$tolerance ) if (design$typeOfDesign == C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY) { design$criticalValues[1:(design$kMax - 1)] <- C_QNORM_THRESHOLD } } .calculateAlphaSpent(design) return(invisible(.getDesignGroupSequentialBetaSpendingApproaches(design, userFunctionCallEnabled))) } #' #' Only for alpha spending approaches #' #' @noRd #' .getDesignGroupSequentialBetaSpendingApproaches <- function(design, userFunctionCallEnabled) { # beta spending approaches (additional to alpha spending)! if (.isBetaSpendingDesignType(design$typeBetaSpending, userDefinedBetaSpendingIncluded = FALSE, noneIncluded = FALSE )) { .getDesignGroupSequentialBetaSpending(design, userFunctionCallEnabled) } # User defined beta spending if (design$typeBetaSpending == C_TYPE_OF_DESIGN_BS_USER) { .getDesignGroupSequentialUserDefinedBetaSpending(design) } return(invisible(design)) } .fillVec <- function(vec, n) { return(c(vec, rep(NA_real_, n - length(vec)))) } #' #' Beta spending approaches (additional to alpha spending) #' Find shift with beta spending such that last critical values coincide #' #' @noRd #' .getDesignGroupSequentialBetaSpending <- function(design, userFunctionCallEnabled) { cppResult <- getDesignGroupSequentialBetaSpendingCpp( design$criticalValues, design$kMax, design$userAlphaSpending, design$userBetaSpending, design$informationRates, design$bindingFutility, design$tolerance, design$typeOfDesign, design$typeBetaSpending, design$gammaA, design$gammaB, design$alpha, design$beta, design$sided, design$betaAdjustment, design$twoSidedPower ) design$futilityBounds <- cppResult$futilityBounds design$criticalValues <- cppResult$criticalValues design$betaSpent <- cppResult$betaSpent design$power <- cppResult$power if (design$sided == 2) { .calculateAlphaSpent(design) } design$.setParameterType("betaSpent", C_PARAM_GENERATED) design$.setParameterType("power", C_PARAM_GENERATED) design$.setParameterType("futilityBounds", C_PARAM_GENERATED) return(invisible(design)) } #' #' User defined beta spending #' #' Find shift with beta spending such that last critical values coincide #' #' @noRd #' .getDesignGroupSequentialUserDefinedBetaSpending <- function(design) { if (design$typeBetaSpending != C_TYPE_OF_DESIGN_BS_USER) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'typeBetaSpending' ('", design$typeBetaSpending, "') must be '", C_TYPE_OF_DESIGN_BS_USER, "'" ) } cppResult <- getDesignGroupSequentialUserDefinedBetaSpendingCpp( design$criticalValues, design$kMax, design$userAlphaSpending, design$userBetaSpending, design$sided, design$informationRates, design$bindingFutility, design$tolerance, design$typeOfDesign, design$gammaA, design$alpha, design$betaAdjustment, design$twoSidedPower ) design$futilityBounds <- cppResult$futilityBounds design$criticalValues <- cppResult$criticalValues design$betaSpent <- cppResult$betaSpent design$power <- cppResult$power if (design$sided == 2) { .calculateAlphaSpent(design) } design$.setParameterType("betaSpent", C_PARAM_GENERATED) design$.setParameterType("power", C_PARAM_GENERATED) design$.setParameterType("futilityBounds", C_PARAM_GENERATED) return(invisible(design)) } #' #' Calculate stopping, rejection and futility probabilities for delayed response design #' #' @noRd #' .calculateDecisionProbabilities <- function(sqrtShift, informationRates, delayedInformation, contRegionUpper, contRegionLower, decisionCriticalValues) { kMax <- length(informationRates) power <- numeric(kMax) futilityProbabilities <- numeric(kMax - 1) stoppingProbabilities <- numeric(kMax - 1) rejectionProbabilities <- numeric(kMax) contRegionLower <- c(contRegionLower, decisionCriticalValues[kMax]) for (stage in 1:(kMax)) { if (!is.na(delayedInformation[stage]) && delayedInformation[stage] > 0) { # information rate vector in case of recruitment stop at 'stage' informationRatesUponDelay <- c( informationRates[1:stage], informationRates[stage] + delayedInformation[stage] ) if (stage == 1) { probs1 <- .getGroupSequentialProbabilities( matrix( c( contRegionUpper[stage] - sqrtShift * sqrt(informationRatesUponDelay[1]), decisionCriticalValues[stage] - sqrtShift * sqrt(informationRatesUponDelay[2]), C_UPPER_BOUNDS_DEFAULT, C_UPPER_BOUNDS_DEFAULT ), nrow = 2, byrow = TRUE ), informationRatesUponDelay ) probs2 <- .getGroupSequentialProbabilities( matrix( c( -C_UPPER_BOUNDS_DEFAULT, decisionCriticalValues[stage] - sqrtShift * sqrt(informationRatesUponDelay[2]), contRegionLower[stage] - sqrtShift * sqrt(informationRatesUponDelay[1]), C_UPPER_BOUNDS_DEFAULT ), nrow = 2, byrow = TRUE ), informationRatesUponDelay ) rejectionProbabilities[stage] <- probs1[2, stage + 1] - probs1[1, stage + 1] + probs2[2, stage + 1] - probs2[1, stage + 1] power[stage] <- rejectionProbabilities[stage] futilityProbabilities[stage] <- probs2[2, stage] stoppingProbabilities[stage] <- probs2[2, stage] + 1 - probs1[1, stage] } else if (stage < kMax) { probs1 <- .getGroupSequentialProbabilities( matrix( c( contRegionLower[1:(stage - 1)] - sqrtShift * sqrt(informationRatesUponDelay[1:(stage - 1)]), contRegionUpper[stage] - sqrtShift * sqrt(informationRatesUponDelay[stage]), decisionCriticalValues[stage] - sqrtShift * sqrt(informationRatesUponDelay[stage + 1]), contRegionUpper[1:(stage - 1)] - sqrtShift * sqrt(informationRatesUponDelay[1:(stage - 1)]), C_UPPER_BOUNDS_DEFAULT, C_UPPER_BOUNDS_DEFAULT ), nrow = 2, byrow = TRUE ), informationRatesUponDelay ) probs2 <- .getGroupSequentialProbabilities( matrix( c( contRegionLower[1:(stage - 1)] - sqrtShift * sqrt(informationRatesUponDelay[1:(stage - 1)]), -C_UPPER_BOUNDS_DEFAULT, decisionCriticalValues[stage] - sqrtShift * sqrt(informationRatesUponDelay[stage + 1]), contRegionUpper[1:(stage - 1)] - sqrtShift * sqrt(informationRatesUponDelay[1:(stage - 1)]), contRegionLower[stage] - sqrtShift * sqrt(informationRatesUponDelay[stage]), C_UPPER_BOUNDS_DEFAULT ), nrow = 2, byrow = TRUE ), informationRatesUponDelay ) rejectionProbabilities[stage] <- probs1[2, stage + 1] - probs1[1, stage + 1] + probs2[2, stage + 1] - probs2[1, stage + 1] power[stage] <- sum(rejectionProbabilities[1:stage]) futilityProbabilities[stage] <- probs2[2, stage] stoppingProbabilities[stage] <- probs2[2, stage] + probs1[2, stage] - probs1[1, stage] } else { probs <- .getGroupSequentialProbabilities( matrix( c( contRegionLower[1:(stage - 1)] - sqrtShift * sqrt(informationRates[1:(stage - 1)]), decisionCriticalValues[stage] - sqrtShift * sqrt(informationRates[stage]), contRegionUpper[1:(stage - 1)] - sqrtShift * sqrt(informationRates[1:(stage - 1)]), C_UPPER_BOUNDS_DEFAULT ), nrow = 2, byrow = TRUE ), informationRates ) rejectionProbabilities[stage] <- probs[2, stage] - probs[1, stage] power[stage] <- sum(rejectionProbabilities[1:stage]) } } else { if (stage == 1) { probs <- .getGroupSequentialProbabilities( matrix( c( contRegionLower[stage] - sqrtShift * sqrt(informationRates[stage]), contRegionUpper[stage] - sqrtShift * sqrt(informationRates[stage]) ), nrow = 2, byrow = TRUE ), informationRates[1] ) } else { probs <- .getGroupSequentialProbabilities( matrix( c( contRegionLower[1:(stage - 1)] - sqrtShift * sqrt(informationRates[1:(stage - 1)]), contRegionLower[stage] - sqrtShift * sqrt(informationRates[stage]), contRegionUpper[1:(stage - 1)] - sqrtShift * sqrt(informationRates[1:(stage - 1)]), contRegionUpper[stage] - sqrtShift * sqrt(informationRates[stage]) ), nrow = 2, byrow = TRUE ), informationRates[1:stage] ) } rejectionProbabilities[stage] <- probs[3, stage] - probs[2, stage] if (stage < kMax) { futilityProbabilities[stage] <- probs[1, stage] stoppingProbabilities[stage] <- futilityProbabilities[stage] + rejectionProbabilities[stage] } power[stage] <- sum(rejectionProbabilities[1:stage]) } } return(list( probs = probs, power = power, rejectionProbabilities = rejectionProbabilities, futilityProbabilities = futilityProbabilities, stoppingProbabilities = stoppingProbabilities )) } #' #' @title #' Get Design Inverse Normal #' #' @description #' Provides adjusted boundaries and defines a group sequential design for its use in #' the inverse normal combination test. #' #' @inheritParams getDesignGroupSequential #' #' @template details_group_sequential_design #' #' @template return_object_trial_design #' @template how_to_get_help_for_generics #' #' @family design functions #' #' @template examples_get_design_inverse_normal #' #' @export #' getDesignInverseNormal <- function(..., kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = 1L, # C_SIDED_DEFAULT informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = c("OF", "P", "WT", "PT", "HP", "WToptimum", "asP", "asOF", "asKD", "asHSD", "asUser", "noEarlyEfficacy"), # C_DEFAULT_TYPE_OF_DESIGN, deltaWT = NA_real_, deltaPT1 = NA_real_, deltaPT0 = NA_real_, optimizationCriterion = c("ASNH1", "ASNIFH1", "ASNsum"), # C_OPTIMIZATION_CRITERION_DEFAULT gammaA = NA_real_, typeBetaSpending = c("none", "bsP", "bsOF", "bsKD", "bsHSD", "bsUser"), # C_TYPE_OF_DESIGN_BS_NONE userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = NA_real_, bindingFutility = NA, betaAdjustment = NA, constantBoundsHP = 3, # C_CONST_BOUND_HP_DEFAULT, twoSidedPower = NA, tolerance = 1e-08 # C_DESIGN_TOLERANCE_DEFAULT ) { .warnInCaseOfUnknownArguments(functionName = "getDesignInverseNormal", ...) return(.getDesignGroupSequential( designClass = C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, kMax = kMax, alpha = alpha, beta = beta, sided = sided, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = typeOfDesign, deltaWT = deltaWT, deltaPT1 = deltaPT1, deltaPT0 = deltaPT0, optimizationCriterion = optimizationCriterion, gammaA = gammaA, typeBetaSpending = typeBetaSpending, userAlphaSpending = userAlphaSpending, userBetaSpending = userBetaSpending, gammaB = gammaB, bindingFutility = bindingFutility, betaAdjustment = betaAdjustment, constantBoundsHP = constantBoundsHP, twoSidedPower = twoSidedPower, tolerance = tolerance, userFunctionCallEnabled = TRUE )) } .getDesignInverseNormal <- function(..., kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = C_SIDED_DEFAULT, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = NA_real_, deltaPT1 = NA_real_, deltaPT0 = NA_real_, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = NA_real_, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = NA_real_, bindingFutility = NA, betaAdjustment = NA, constantBoundsHP = C_CONST_BOUND_HP_DEFAULT, twoSidedPower = NA, tolerance = C_DESIGN_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = "getDesignInverseNormal", ...) return(.getDesignGroupSequential( designClass = C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, kMax = kMax, alpha = alpha, beta = beta, sided = sided, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = typeOfDesign, deltaWT = deltaWT, deltaPT1 = deltaPT1, deltaPT0 = deltaPT0, optimizationCriterion = optimizationCriterion, gammaA = gammaA, typeBetaSpending = typeBetaSpending, userAlphaSpending = userAlphaSpending, userBetaSpending = userBetaSpending, gammaB = gammaB, bindingFutility = bindingFutility, betaAdjustment = betaAdjustment, constantBoundsHP = constantBoundsHP, twoSidedPower = twoSidedPower, tolerance = tolerance, userFunctionCallEnabled = FALSE )) } .getDesignGroupSequentialDefaultValues <- function() { return(list( kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = C_SIDED_DEFAULT, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = NA_real_, deltaPT1 = NA_real_, deltaPT0 = NA_real_, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = NA_real_, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = NA_real_, twoSidedPower = C_TWO_SIDED_POWER_DEFAULT, tolerance = C_DESIGN_TOLERANCE_DEFAULT )) } .getDesignInverseNormalDefaultValues <- function() { return(.getDesignGroupSequentialDefaultValues()) } # # Param: userFunctionCallEnabled if \code{TRUE}, additional parameter validation methods will be called. # .getDesignGroupSequential <- function(..., designClass = C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL, kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = C_SIDED_DEFAULT, informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = NA_real_, deltaPT1 = NA_real_, deltaPT0 = NA_real_, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = NA_real_, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = NA_real_, bindingFutility = C_BINDING_FUTILITY_DEFAULT, constantBoundsHP = C_CONST_BOUND_HP_DEFAULT, twoSidedPower = NA, betaAdjustment = NA, delayedInformation = NA_real_, tolerance = C_DESIGN_TOLERANCE_DEFAULT, userFunctionCallEnabled = FALSE) { typeOfDesign <- .matchArgument(typeOfDesign, C_DEFAULT_TYPE_OF_DESIGN) optimizationCriterion <- .matchArgument(optimizationCriterion, C_OPTIMIZATION_CRITERION_DEFAULT) typeBetaSpending <- .matchArgument(typeBetaSpending, C_TYPE_OF_DESIGN_BS_NONE) if (.isDefinedArgument(kMax, argumentExistsValidationEnabled = userFunctionCallEnabled)) { .assertIsValidKMax(kMax, showWarnings = TRUE) if (!is.integer(kMax)) { kMax <- as.integer(kMax) } } if (is.na(bindingFutility)) { bindingFutility <- C_BINDING_FUTILITY_DEFAULT } else if (userFunctionCallEnabled && typeOfDesign != C_TYPE_OF_DESIGN_PT && !(typeBetaSpending == "bsP" || typeBetaSpending == "bsOF" || typeBetaSpending == "bsKD" || typeBetaSpending == "bsHSD" || typeBetaSpending == "bsUser") && ((!is.na(kMax) && kMax == 1) || any(is.na(futilityBounds)) || (!any(is.na(futilityBounds)) && all(futilityBounds == C_FUTILITY_BOUNDS_DEFAULT)))) { warning("'bindingFutility' (", bindingFutility, ") will be ignored", call. = FALSE) } design <- .createDesign( designClass = designClass, kMax = kMax, alpha = alpha, beta = beta, sided = sided, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = typeOfDesign, deltaWT = deltaWT, deltaPT1 = deltaPT1, deltaPT0 = deltaPT0, optimizationCriterion = optimizationCriterion, gammaA = gammaA, typeBetaSpending = typeBetaSpending, userAlphaSpending = userAlphaSpending, userBetaSpending = userBetaSpending, gammaB = gammaB, bindingFutility = bindingFutility, constantBoundsHP = constantBoundsHP, twoSidedPower = twoSidedPower, betaAdjustment = betaAdjustment, delayedInformation = delayedInformation, tolerance = tolerance ) if (userFunctionCallEnabled) { .validateBaseParameters(design, twoSidedWarningForDefaultValues = FALSE) .validateTypeOfDesign(design) .assertIsValidTolerance(tolerance) .assertDesignParameterExists(design, "alpha", C_ALPHA_DEFAULT) .assertDesignParameterExists(design, "beta", C_BETA_DEFAULT) .assertDesignParameterExists(design, "sided", C_SIDED_DEFAULT) .assertDesignParameterExists(design, "typeOfDesign", C_DEFAULT_TYPE_OF_DESIGN) .assertDesignParameterExists(design, "bindingFutility", C_BINDING_FUTILITY_DEFAULT) .assertDesignParameterExists(design, "tolerance", C_DESIGN_TOLERANCE_DEFAULT) if (typeOfDesign != C_TYPE_OF_DESIGN_PT) { if (!is.na(deltaPT1)) { warning("'deltaPT1' (", deltaPT1, ") will be ignored", call. = FALSE) } if (!is.na(deltaPT0)) { warning("'deltaPT0' (", deltaPT0, ") will be ignored", call. = FALSE) } } if (typeOfDesign != C_TYPE_OF_DESIGN_WT && !is.na(deltaWT)) { warning("'deltaWT' (", deltaWT, ") will be ignored", call. = FALSE) } if (typeOfDesign != C_TYPE_OF_DESIGN_AS_KD && typeOfDesign != C_TYPE_OF_DESIGN_AS_HSD && !is.na(gammaA)) { warning("'gammaA' (", gammaA, ") will be ignored", call. = FALSE) } if (typeBetaSpending != C_TYPE_OF_DESIGN_BS_KD && typeBetaSpending != C_TYPE_OF_DESIGN_BS_HSD && !is.na(gammaB)) { warning("'gammaB' (", gammaB, ") will be ignored", call. = FALSE) } if (typeBetaSpending != C_TYPE_OF_DESIGN_BS_USER && !all(is.na(userBetaSpending))) { warning("'userBetaSpending' (", .arrayToString(userBetaSpending), ") will be ignored", call. = FALSE) } if (!(typeOfDesign %in% c(C_TYPE_OF_DESIGN_AS_USER)) && !all(is.na(userAlphaSpending))) { warning("'userAlphaSpending' (", .arrayToString(userAlphaSpending), ") will be ignored", call. = FALSE) } } if (design$sided == 2 && design$bindingFutility && design$typeOfDesign != C_TYPE_OF_DESIGN_PT && !.isBetaSpendingDesignType(design$typeBetaSpending)) { warning("'bindingFutility' will be ignored because the test is defined as two-sided", call. = FALSE) design$bindingFutility <- FALSE } if (design$sided == 1 && design$twoSidedPower) { warning("'twoSidedPower' will be ignored because the test is defined as one-sided", call. = FALSE) design$twoSidedPower <- FALSE } if (userFunctionCallEnabled) { .validateAlphaAndBeta(design) } design$alphaSpent <- rep(NA_real_, design$kMax) design$betaSpent <- rep(NA_real_, design$kMax) design$power <- rep(NA_real_, design$kMax) if (userFunctionCallEnabled) { design$.setParameterType("betaSpent", C_PARAM_NOT_APPLICABLE) design$.setParameterType("power", C_PARAM_NOT_APPLICABLE) design$.setParameterType("alphaSpent", C_PARAM_NOT_APPLICABLE) design$.setParameterType("criticalValues", C_PARAM_GENERATED) } if (design$kMax == 1) { .getDesignGroupSequentialKMax1(design) } else { # Wang and Tsiatis design if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT || design$typeOfDesign == C_TYPE_OF_DESIGN_P || design$typeOfDesign == C_TYPE_OF_DESIGN_OF) { .getDesignGroupSequentialWangAndTsiatis(design) } # Pampallona & Tsiatis design else if (design$typeOfDesign == C_TYPE_OF_DESIGN_PT) { .getDesignGroupSequentialPampallonaTsiatis(design) } # Haybittle & Peto design else if (design$typeOfDesign == C_TYPE_OF_DESIGN_HP) { .getDesignGroupSequentialHaybittleAndPeto(design) } # Optimum design within Wang and Tsiatis class else if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT_OPTIMUM) { .getDesignGroupSequentialWangAndTsiatisOptimum(design) } # alpha spending approaches else if (.isAlphaSpendingDesignType(design$typeOfDesign, userDefinedAlphaSpendingIncluded = FALSE)) { .getDesignGroupSequentialAlphaSpending(design, userFunctionCallEnabled) } # user defined alpha spending approach else if (design$typeOfDesign %in% c(C_TYPE_OF_DESIGN_AS_USER, C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY)) { .getDesignGroupSequentialUserDefinedAlphaSpending(design, userFunctionCallEnabled) } else { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "no calculation routine defined for ", design$typeOfDesign) } } design$stageLevels <- 1 - stats::pnorm(design$criticalValues) design$.setParameterType("stageLevels", C_PARAM_GENERATED) if (design$kMax == 1) { design$.setParameterType("futilityBounds", C_PARAM_NOT_APPLICABLE) } if (!all(is.na(design$futilityBounds))) { if (length(design$futilityBounds) == 0 || all(design$futilityBounds == C_FUTILITY_BOUNDS_DEFAULT)) { design$.setParameterType("bindingFutility", C_PARAM_NOT_APPLICABLE) design$.setParameterType("futilityBounds", C_PARAM_NOT_APPLICABLE) } else if (userFunctionCallEnabled && any(design$futilityBounds > design$criticalValues[1:(design$kMax - 1)] - 0.01, na.rm = TRUE)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'futilityBounds' (", .arrayToString(design$futilityBounds), ") too extreme for this situation" ) } } .assertIsValidAlphaSpent(design, userFunctionCallEnabled) design$.initStages() # we use 7.5 instead of C_QNORM_THRESHOLD as threshold design$criticalValues[!is.na(design$criticalValues) & design$criticalValues <= -7.5] <- -Inf design$criticalValues[!is.na(design$criticalValues) & design$criticalValues >= 7.5] <- Inf design$futilityBounds[!is.na(design$futilityBounds) & design$futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- C_FUTILITY_BOUNDS_DEFAULT if (design$kMax == 1) { if (!identical(design$informationRates, 1)) { if (!is.na(design$informationRates)) { warning("Information rate", ifelse(length(design$informationRates) != 1, "s", ""), " ", .arrayToString(design$informationRates, vectorLookAndFeelEnabled = TRUE), " will be ignored", call. = FALSE ) } design$informationRates <- 1 } design$.setParameterType("informationRates", C_PARAM_NOT_APPLICABLE) design$.setParameterType("stages", C_PARAM_NOT_APPLICABLE) } .assertIsNumericVector(delayedInformation, "delayedInformation", naAllowed = TRUE) if (all(is.na(delayedInformation))) { # delayed response design is disabled return(design) } if (all(!is.na(delayedInformation)) && all(delayedInformation < 1e-03)) { warning("At least one delayed information value must be >= 1e-03 to enable delayed response.", " 'delayedInformation' (", .arrayToString(delayedInformation), ") will be ignored", call. = FALSE ) return(design) } # proceed with delayed response design .assertIsInClosedInterval(delayedInformation, "delayedInformation", lower = 0, upper = NULL) kMax <- design$kMax contRegionUpper <- design$criticalValues contRegionLower <- design$futilityBounds informationRates <- design$informationRates decisionCriticalValues <- numeric(kMax) reversalProbabilities <- numeric(kMax - 1) if (!all(is.na(delayedInformation)) && (design$sided != 1 || all(design$futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT + 1e-06))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "decision critical values for delayed response design are only available for ", "one-sided designs with valid futility bounds" ) } if (length(delayedInformation) == 1) { delayedInformation <- rep(delayedInformation, kMax - 1) } if (length(delayedInformation) != kMax - 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'delayedInformation' (", .arrayToString(delayedInformation), ") must have length ", (kMax - 1), " (kMax - 1)" ) } indices <- which(delayedInformation > 0 & delayedInformation < 1e-03) n <- length(indices) if (n > 0) { warning("The", ifelse(n == 1, "", paste0(" ", n)), " delayed information value", ifelse(n == 1, "", "s"), " ", .arrayToString(delayedInformation[indices], mode = "and"), " will be replaced by 1e-03 to achieve reasonable results", call. = FALSE ) delayedInformation[indices] <- 1e-03 } # sensible interim choices are restricted by amount of delayed information eps <- design$informationRates[1:(design$kMax - 1)] + delayedInformation if (!any(is.na(eps)) && any(eps >= 1)) { stages <- which(eps >= 1) stagesInfo <- ifelse(length(stages) == 1, paste0(" ", stages), paste0("s ", .arrayToString(stages, mode = "and"))) stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'delayedInformation[stage] + informationRates[stage]' for ", "stage", stagesInfo, " too large (>= 1). Recruitment stop analysis information + pipeline data ", "information cannot exceed overall trial information. Instead, the ", "recruitment stop analysis would be skipped, directly proceeding to the ", "final analysis" ) } # loop iterating through the stages calculation the decision critical values for (stage in 1:(kMax - 1)) { if (!is.na(delayedInformation[stage]) && delayedInformation[stage] >= 1e-03 - 1e-06) { # information rate vector in case of recruitment stop at 'stage' informationRatesUponDelay <- c( informationRates[1:stage], informationRates[stage] + delayedInformation[stage] ) if (stage == 1) { decisionCriticalValues[stage] <- .getOneDimensionalRoot(function(secondCriticalValue) { probs1 <- .getGroupSequentialProbabilities( matrix(c(contRegionUpper[stage], secondCriticalValue, C_UPPER_BOUNDS_DEFAULT, C_UPPER_BOUNDS_DEFAULT), nrow = 2, byrow = TRUE ), informationRatesUponDelay ) probs2 <- .getGroupSequentialProbabilities( matrix( c( -C_UPPER_BOUNDS_DEFAULT, secondCriticalValue, contRegionLower[stage], C_UPPER_BOUNDS_DEFAULT ), nrow = 2, byrow = TRUE ), informationRatesUponDelay ) return(probs1[1, stage + 1] - probs2[2, stage + 1] + probs2[1, stage + 1]) }, lower = -C_UPPER_BOUNDS_DEFAULT, upper = C_UPPER_BOUNDS_DEFAULT, tolerance = design$tolerance) probs <- .getGroupSequentialProbabilities( matrix( c( contRegionUpper[stage], decisionCriticalValues[stage], C_UPPER_BOUNDS_DEFAULT, C_UPPER_BOUNDS_DEFAULT ), nrow = 2, byrow = TRUE ), informationRatesUponDelay ) } else { decisionCriticalValues[stage] <- .getOneDimensionalRoot(function(secondCriticalValue) { probs1 <- .getGroupSequentialProbabilities( matrix( c( contRegionLower[1:(stage - 1)], contRegionUpper[stage], secondCriticalValue, contRegionUpper[1:(stage - 1)], C_UPPER_BOUNDS_DEFAULT, C_UPPER_BOUNDS_DEFAULT ), nrow = 2, byrow = TRUE ), informationRatesUponDelay ) probs2 <- .getGroupSequentialProbabilities( matrix( c( contRegionLower[1:(stage - 1)], -C_UPPER_BOUNDS_DEFAULT, secondCriticalValue, contRegionUpper[1:(stage - 1)], contRegionLower[stage], C_UPPER_BOUNDS_DEFAULT ), nrow = 2, byrow = TRUE ), informationRatesUponDelay ) return(probs1[1, stage + 1] - probs2[2, stage + 1] + probs2[1, stage + 1]) }, lower = -C_UPPER_BOUNDS_DEFAULT, upper = C_UPPER_BOUNDS_DEFAULT, tolerance = design$tolerance) probs <- .getGroupSequentialProbabilities( matrix( c( contRegionLower[1:(stage - 1)], contRegionUpper[stage], decisionCriticalValues[stage], contRegionUpper[1:(stage - 1)], C_UPPER_BOUNDS_DEFAULT, C_UPPER_BOUNDS_DEFAULT ), nrow = 2, byrow = TRUE ), informationRatesUponDelay ) } if (stage < kMax) { reversalProbabilities[stage] <- probs[1, stage + 1] } } else { decisionCriticalValues[stage] <- NA_real_ reversalProbabilities[stage] <- NA_real_ } decisionCriticalValues[kMax] <- contRegionUpper[kMax] alphaSpent <- .calculateDecisionProbabilities( sqrtShift = 0, informationRates, delayedInformation, contRegionUpper, contRegionLower, decisionCriticalValues )$power } decisionCriticalValues[decisionCriticalValues <= -C_UPPER_BOUNDS_DEFAULT + 1e-06] <- NA_real_ decisionCriticalValues[decisionCriticalValues >= C_UPPER_BOUNDS_DEFAULT - 1e-06] <- NA_real_ design$decisionCriticalValues <- decisionCriticalValues design$reversalProbabilities <- reversalProbabilities design$delayedInformation <- delayedInformation design$delayedInformation[design$delayedInformation < 1e-03] <- 0 design$.setParameterType("decisionCriticalValues", C_PARAM_GENERATED) design$.setParameterType("reversalProbabilities", C_PARAM_GENERATED) warning("The delayed information design feature is experimental and ", "hence not fully validated (see www.rpact.com/experimental)", call. = FALSE ) return(design) } # to avoid error messages in case of repeated p-values computation .assertIsValidAlphaSpent <- function(design, userFunctionCallEnabled = TRUE) { if (!userFunctionCallEnabled) { return(invisible()) } if (design$informationRates[design$kMax] != 1) { return(invisible()) } if (is.na(design$alphaSpent[design$kMax]) || abs(design$alphaSpent[design$kMax] - design$alpha) > 1e-05) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "critical values cannot be calculated ", "(alpha: ", design$alpha, "; alpha spent at maximum stage: ", design$alphaSpent[design$kMax], ")" ) } } .assertIsValidBetaSpent <- function(design, ..., userFunctionCallEnabled = TRUE, iteration = 1) { if (!userFunctionCallEnabled) { return(invisible()) } if (design$informationRates[design$kMax] != 1) { return(invisible()) } if (iteration < 0) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "critical values cannot be calculated") } if (is.na(design$betaSpent[design$kMax]) || abs(design$betaSpent[design$kMax] - design$beta) > 1e-05) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "critical values cannot be calculated ", "(beta spent at maximum stage: ", design$betaSpent[design$kMax], ")" ) } } #' #' @title #' Get Design Group Sequential #' #' @description #' Provides adjusted boundaries and defines a group sequential design. #' #' @inheritParams param_kMax #' @inheritParams param_alpha #' @inheritParams param_beta #' @inheritParams param_sided #' @inheritParams param_typeOfDesign #' @inheritParams param_informationRates #' @param futilityBounds The futility bounds, defined on the test statistic z scale #' (numeric vector of length \code{kMax - 1}). #' @inheritParams param_bindingFutility #' @param deltaWT Delta for Wang & Tsiatis Delta class. #' @param deltaPT1 Delta1 for Pampallona & Tsiatis class rejecting H0 boundaries. #' @param deltaPT0 Delta0 for Pampallona & Tsiatis class rejecting H1 boundaries. #' @param constantBoundsHP The constant bounds up to stage \code{kMax - 1} for the #' Haybittle & Peto design (default is \code{3}). #' @param optimizationCriterion Optimization criterion for optimum design within #' Wang & Tsiatis class (\code{"ASNH1"}, \code{"ASNIFH1"}, #' \code{"ASNsum"}), default is \code{"ASNH1"}, see details. #' @param typeBetaSpending Type of beta spending. Type of of beta spending is one of the following: #' O'Brien & Fleming type beta spending, Pocock type beta spending, #' Kim & DeMets beta spending, Hwang, Shi & DeCani beta spending, user defined #' beta spending (\code{"bsOF"}, \code{"bsP"}, \code{"bsKD"}, #' \code{"bsHSD"}, \code{"bsUser"}, default is \code{"none"}). #' @param gammaA Parameter for alpha spending function. #' @param gammaB Parameter for beta spending function. #' @inheritParams param_userAlphaSpending #' @param delayedInformation Delay of information for delayed response designs. Can be a numeric value or a #' numeric vector of length \code{kMax - 1} #' @param userBetaSpending The user defined beta spending. Vector of length \code{kMax} containing the cumulative #' beta-spending up to each interim stage. #' @param twoSidedPower For two-sided testing, if \code{twoSidedPower = TRUE} is specified #' the sample size calculation is performed by considering both tails of the distribution. #' Default is \code{FALSE}, i.e., it is assumed that one tail probability is equal to 0 or the power #' should be directed to one part. #' @param betaAdjustment For two-sided beta spending designs, if \code{betaAdjustement = TRUE} a linear #' adjustment of the beta spending values is performed if an overlapping of decision regions for futility #' stopping at earlier stages occurs, otherwise no adjustment is performed (default is \code{TRUE}). #' @param tolerance The numerical tolerance, default is \code{1e-08}. #' @inheritParams param_three_dots #' #' @template details_group_sequential_design #' #' @template return_object_trial_design #' @template how_to_get_help_for_generics #' #' @family design functions #' #' @template examples_get_design_group_sequential #' #' @export #' getDesignGroupSequential <- function(..., kMax = NA_integer_, alpha = NA_real_, beta = NA_real_, sided = 1L, # C_SIDED_DEFAULT informationRates = NA_real_, futilityBounds = NA_real_, typeOfDesign = c("OF", "P", "WT", "PT", "HP", "WToptimum", "asP", "asOF", "asKD", "asHSD", "asUser", "noEarlyEfficacy"), # C_DEFAULT_TYPE_OF_DESIGN, deltaWT = NA_real_, deltaPT1 = NA_real_, deltaPT0 = NA_real_, optimizationCriterion = c("ASNH1", "ASNIFH1", "ASNsum"), # C_OPTIMIZATION_CRITERION_DEFAULT gammaA = NA_real_, typeBetaSpending = c("none", "bsP", "bsOF", "bsKD", "bsHSD", "bsUser"), # C_TYPE_OF_DESIGN_BS_NONE userAlphaSpending = NA_real_, userBetaSpending = NA_real_, gammaB = NA_real_, bindingFutility = NA, betaAdjustment = NA, constantBoundsHP = 3, # C_CONST_BOUND_HP_DEFAULT, twoSidedPower = NA, delayedInformation = NA_real_, tolerance = 1e-08 # C_DESIGN_TOLERANCE_DEFAULT ) { .warnInCaseOfUnknownArguments(functionName = "getDesignGroupSequential", ...) return(.getDesignGroupSequential( designClass = C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL, kMax = kMax, alpha = alpha, beta = beta, sided = sided, informationRates = informationRates, futilityBounds = futilityBounds, typeOfDesign = typeOfDesign, deltaWT = deltaWT, deltaPT1 = deltaPT1, deltaPT0 = deltaPT0, optimizationCriterion = optimizationCriterion, gammaA = gammaA, typeBetaSpending = typeBetaSpending, userAlphaSpending = userAlphaSpending, userBetaSpending = userBetaSpending, gammaB = gammaB, bindingFutility = bindingFutility, constantBoundsHP = constantBoundsHP, twoSidedPower = twoSidedPower, betaAdjustment = betaAdjustment, delayedInformation = delayedInformation, tolerance = tolerance, userFunctionCallEnabled = TRUE )) } .getFixedSampleSize <- function(alpha, beta, sided, twoSidedPower = C_TWO_SIDED_POWER_DEFAULT) { .assertIsValidAlphaAndBeta(alpha = alpha, beta = beta) .assertIsValidSidedParameter(sided) if (sided == 1) { return((.getOneMinusQNorm(alpha) + .getOneMinusQNorm(beta))^2) } if (twoSidedPower) { n <- .getOneDimensionalRoot( function(n) { stats::pnorm(-.getOneMinusQNorm(alpha / 2) - sqrt(n)) - stats::pnorm(.getOneMinusQNorm(alpha / 2) - sqrt(n)) + beta }, lower = 0, upper = 2 * (.getOneMinusQNorm(alpha / 2) + .getOneMinusQNorm(beta))^2, tolerance = 1e-08, callingFunctionInformation = ".getFixedSampleSize" ) } else { n <- (.getOneMinusQNorm(alpha / 2) + .getOneMinusQNorm(beta))^2 } return(n) } #' @title #' Get Design Characteristics #' #' @description #' Calculates the characteristics of a design and returns it. #' #' @inheritParams param_design #' @inheritParams param_three_dots #' #' @details #' Calculates the inflation factor (IF), #' the expected reduction in sample size under H1, under H0, and under a value in between H0 and H1. #' Furthermore, absolute information values are calculated #' under the prototype case testing H0: mu = 0 against H1: mu = 1. #' #' @return Returns a \code{\link{TrialDesignCharacteristics}} object. #' The following generics (R generic functions) are available for this result object: #' \itemize{ #' \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, #' \item \code{\link[=print.FieldSet]{print()}} to print the object, #' \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, #' \item \code{\link[=plot.ParameterSet]{plot()}} to plot the object, #' \item \code{\link[=as.data.frame.TrialDesignCharacteristics]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, #' \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. #' } #' @template how_to_get_help_for_generics #' #' @family design functions #' #' @template examples_get_design_characteristics #' #' @export #' getDesignCharacteristics <- function(design = NULL, ...) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "characteristics") .warnInCaseOfUnknownArguments( functionName = "getDesignCharacteristics", ignore = .getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = FALSE), ... ) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getDesignCharacteristics", ...) .warnInCaseOfTwoSidedPowerArgument(...) } return(.getDesignCharacteristics(design = design, userFunctionCallEnabled = TRUE)) } .getDesignCharacteristics <- function(..., design, userFunctionCallEnabled = FALSE) { .assertIsTrialDesignInverseNormalOrGroupSequential(design) .assertDesignParameterExists(design, "sided", C_SIDED_DEFAULT) .assertIsValidSidedParameter(design$sided) if (userFunctionCallEnabled) { .validateAlphaAndBeta(design = design) } design$informationRates <- .getValidatedInformationRates(design, writeToDesign = FALSE) if ((design$typeOfDesign == C_TYPE_OF_DESIGN_PT || .isBetaSpendingDesignType(design$typeBetaSpending)) && design$sided == 2 && design$kMax == 2) { design$futilityBounds[is.na(design$futilityBounds)] <- 0 } design$futilityBounds <- .getValidatedFutilityBounds(design, writeToDesign = FALSE, twoSidedWarningForDefaultValues = FALSE ) designCharacteristics <- TrialDesignCharacteristics(design = design) designCharacteristics$rejectionProbabilities <- rep(NA_real_, design$kMax) designCharacteristics$.setParameterType("rejectionProbabilities", C_PARAM_NOT_APPLICABLE) designCharacteristics$futilityProbabilities <- rep(NA_real_, design$kMax - 1) designCharacteristics$.setParameterType("futilityProbabilities", C_PARAM_NOT_APPLICABLE) nFixed <- .getFixedSampleSize( alpha = design$alpha, beta = design$beta, sided = design$sided, twoSidedPower = design$twoSidedPower ) designCharacteristics$nFixed <- nFixed designCharacteristics$.setParameterType("nFixed", C_PARAM_GENERATED) design$criticalValues[design$criticalValues > 7.5] <- 7.5 if (length(design$decisionCriticalValues) > 0) { design$decisionCriticalValues[!is.na(design$decisionCriticalValues) & design$decisionCriticalValues > 7.5] <- 7.5 } informationRates <- design$informationRates if (design$kMax == 1) { designCharacteristics$shift <- nFixed designCharacteristics$.setParameterType("shift", C_PARAM_GENERATED) designCharacteristics$inflationFactor <- designCharacteristics$shift / nFixed designCharacteristics$.setParameterType("inflationFactor", C_PARAM_GENERATED) designCharacteristics$power <- 1 - design$beta designCharacteristics$.setParameterType("power", design$.getParameterType("power")) designCharacteristics$.setParameterType("information", C_PARAM_NOT_APPLICABLE) designCharacteristics$.setParameterType("averageSampleNumber1", C_PARAM_NOT_APPLICABLE) designCharacteristics$.setParameterType("averageSampleNumber01", C_PARAM_NOT_APPLICABLE) designCharacteristics$.setParameterType("averageSampleNumber0", C_PARAM_NOT_APPLICABLE) designCharacteristics$.setParameterType(".probs", C_PARAM_NOT_APPLICABLE) return(designCharacteristics) } if (!any(is.na(design$delayedInformation)) && length(design$decisionCriticalValues) > 0) { kMax <- design$kMax contRegionUpper <- design$criticalValues contRegionLower <- design$futilityBounds informationRates <- design$informationRates decisionCriticalValues <- design$decisionCriticalValues informationRates <- design$informationRates delayedInformation <- design$delayedInformation kMax <- length(informationRates) shift <- .getOneDimensionalRoot( function(shift) { resultsH1 <- .calculateDecisionProbabilities(sqrt(shift), informationRates, delayedInformation, contRegionUpper, contRegionLower, decisionCriticalValues) return(resultsH1$power[kMax] - 1 + design$beta) }, lower = 0, upper = 4 * nFixed, tolerance = design$tolerance, callingFunctionInformation = ".getDesignCharacteristics" ) stopping <- numeric(kMax) futility <- numeric(kMax) rejectionProbabilities <- numeric(kMax) resultsH1 <- .calculateDecisionProbabilities( sqrtShift = sqrt(shift), informationRates, delayedInformation, contRegionUpper, contRegionLower, decisionCriticalValues ) resultsH01 <- .calculateDecisionProbabilities( sqrtShift = sqrt(shift) / 2, informationRates, delayedInformation, contRegionUpper, contRegionLower, decisionCriticalValues ) resultsH0 <- .calculateDecisionProbabilities( sqrtShift = 0, informationRates, delayedInformation, contRegionUpper, contRegionLower, decisionCriticalValues ) designCharacteristics$shift <- shift designCharacteristics$.probs <- resultsH1$probs designCharacteristics$power <- resultsH1$power designCharacteristics$information <- informationRates * shift designCharacteristics$averageSampleNumber1 <- (shift - sum(resultsH1$stoppingProbabilities * (informationRates[kMax] - delayedInformation - informationRates[1:(kMax - 1)]) * shift)) / nFixed designCharacteristics$averageSampleNumber01 <- (shift - sum(resultsH01$stoppingProbabilities * (informationRates[kMax] - delayedInformation - informationRates[1:(kMax - 1)]) * shift)) / nFixed designCharacteristics$averageSampleNumber0 <- (shift - sum(resultsH0$stoppingProbabilities * (informationRates[kMax] - delayedInformation - informationRates[1:(kMax - 1)]) * shift)) / nFixed futilityProbabilities <- resultsH1$futilityProbabilities rejectionProbabilities <- resultsH1$power stoppingProbabilities <- resultsH1$stoppingProbabilities rejectionProbabilities[2:kMax] <- resultsH1$power[2:kMax] - rejectionProbabilities[1:(kMax - 1)] designCharacteristics$rejectionProbabilities <- rejectionProbabilities designCharacteristics$futilityProbabilities <- futilityProbabilities } else if ((design$typeOfDesign == C_TYPE_OF_DESIGN_PT || .isBetaSpendingDesignType(design$typeBetaSpending)) && design$sided == 2) { design$futilityBounds[is.na(design$futilityBounds)] <- 0 shift <- .getOneDimensionalRoot( function(shift) { decisionMatrix <- matrix(c( -design$criticalValues - sqrt(shift * informationRates), c(-design$futilityBounds - sqrt(shift * informationRates[1:(design$kMax - 1)]), 0), c(design$futilityBounds - sqrt(shift * informationRates[1:(design$kMax - 1)]), 0), design$criticalValues - sqrt(shift * informationRates) ), nrow = 4, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) if (design$twoSidedPower) { return(sum(probs[5, ] - probs[4, ] + probs[1, ]) - 1 + design$beta) } else { return(sum(probs[5, ] - probs[4, ]) - 1 + design$beta) } }, lower = 0, upper = 4 * (.getOneMinusQNorm(design$alpha / design$sided) + .getOneMinusQNorm(design$beta))^2, tolerance = design$tolerance, callingFunctionInformation = ".getDesignCharacteristics" ) decisionMatrix <- matrix(c( -design$criticalValues - sqrt(shift * informationRates), c(-design$futilityBounds - sqrt(shift * informationRates[1:(design$kMax - 1)]), 0), c(design$futilityBounds - sqrt(shift * informationRates[1:(design$kMax - 1)]), 0), design$criticalValues - sqrt(shift * informationRates) ), nrow = 4, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) designCharacteristics$shift <- shift designCharacteristics$.probs <- probs if (design$twoSidedPower) { designCharacteristics$power <- cumsum(probs[5, ] - probs[4, ] + probs[1, ]) designCharacteristics$rejectionProbabilities <- probs[5, ] - probs[4, ] + probs[1, ] } else { designCharacteristics$power <- cumsum(probs[5, ] - probs[4, ]) designCharacteristics$rejectionProbabilities <- probs[5, ] - probs[4, ] } if (design$kMax > 1) { designCharacteristics$futilityProbabilities <- probs[3, 1:(design$kMax - 1)] - probs[2, 1:(design$kMax - 1)] } designCharacteristics$information <- informationRates * shift designCharacteristics$averageSampleNumber1 <- .getAverageSampleNumber( design$kMax, design$informationRates, probs, shift, nFixed ) decisionMatrix <- matrix(c( -design$criticalValues, c(-design$futilityBounds, 0), c(design$futilityBounds, 0), design$criticalValues ), nrow = 4, byrow = TRUE) probs0 <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) designCharacteristics$averageSampleNumber0 <- .getAverageSampleNumber( design$kMax, design$informationRates, probs0, shift, nFixed ) decisionMatrix <- matrix(c( -design$criticalValues - sqrt(shift * informationRates) / 2, c(-design$futilityBounds - sqrt(shift * informationRates[1:(design$kMax - 1)]) / 2, 0), c(design$futilityBounds - sqrt(shift * informationRates[1:(design$kMax - 1)]) / 2, 0), design$criticalValues - sqrt(shift * informationRates) / 2 ), nrow = 4, byrow = TRUE) probs01 <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) designCharacteristics$averageSampleNumber01 <- .getAverageSampleNumber( design$kMax, design$informationRates, probs01, shift, nFixed ) design$futilityBounds[design$futilityBounds == 0] <- NA_real_ } else { shift <- .getOneDimensionalRoot( function(shift) { if (design$sided == 2) { decisionMatrix <- matrix(c( -design$criticalValues - sqrt(shift * informationRates), design$criticalValues - sqrt(shift * informationRates) ), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) if (design$twoSidedPower) { return(sum(probs[3, ] - probs[2, ] + probs[1, ]) - 1 + design$beta) } else { return(sum(probs[3, ] - probs[2, ]) - 1 + design$beta) } } else { shiftedFutilityBounds <- design$futilityBounds - sqrt(shift * informationRates[1:(design$kMax - 1)]) shiftedFutilityBounds[design$futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- C_FUTILITY_BOUNDS_DEFAULT decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues - sqrt(shift * informationRates) ), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) return(sum(probs[3, ] - probs[2, ]) - 1 + design$beta) } }, lower = 0, upper = 4 * (.getOneMinusQNorm(design$alpha / design$sided) + .getOneMinusQNorm(design$beta))^2, tolerance = design$tolerance, callingFunctionInformation = ".getDesignCharacteristics" ) if (design$sided == 2) { decisionMatrix <- matrix(c( -design$criticalValues - sqrt(shift * informationRates), design$criticalValues - sqrt(shift * informationRates) ), nrow = 2, byrow = TRUE) } else { shiftedFutilityBounds <- design$futilityBounds - sqrt(shift * informationRates[1:(design$kMax - 1)]) shiftedFutilityBounds[design$futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- C_FUTILITY_BOUNDS_DEFAULT decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues - sqrt(shift * informationRates) ), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) designCharacteristics$shift <- shift designCharacteristics$.probs <- probs if (design$twoSidedPower) { designCharacteristics$power <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) designCharacteristics$rejectionProbabilities <- probs[3, ] - probs[2, ] + probs[1, ] } else { designCharacteristics$power <- cumsum(probs[3, ] - probs[2, ]) designCharacteristics$rejectionProbabilities <- probs[3, ] - probs[2, ] } if (design$kMax > 1) { if (design$sided == 2) { designCharacteristics$futilityProbabilities <- rep(0, design$kMax - 1) } else { designCharacteristics$futilityProbabilities <- probs[1, 1:(design$kMax - 1)] designCharacteristics$futilityProbabilities[design$futilityBounds == C_FUTILITY_BOUNDS_DEFAULT] <- 0 } designCharacteristics$power <- .getNoEarlyEfficacyZeroCorrectedValues( design, designCharacteristics$power ) designCharacteristics$rejectionProbabilities <- .getNoEarlyEfficacyZeroCorrectedValues( design, designCharacteristics$rejectionProbabilities ) } designCharacteristics$information <- informationRates * shift designCharacteristics$averageSampleNumber1 <- .getAverageSampleNumber( design$kMax, design$informationRates, probs, shift, nFixed ) if (design$sided == 2) { decisionMatrix <- matrix(c(-design$criticalValues, design$criticalValues), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c( design$futilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues ), nrow = 2, byrow = TRUE) } probs0 <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) designCharacteristics$averageSampleNumber0 <- .getAverageSampleNumber( design$kMax, design$informationRates, probs0, shift, nFixed ) if (design$sided == 2) { decisionMatrix <- matrix(c( -design$criticalValues - sqrt(shift * informationRates) / 2, design$criticalValues - sqrt(shift * informationRates) / 2 ), nrow = 2, byrow = TRUE) } else { shiftedFutilityBounds <- design$futilityBounds - sqrt(shift * informationRates[1:(design$kMax - 1)]) / 2 shiftedFutilityBounds[design$futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- C_FUTILITY_BOUNDS_DEFAULT decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, design$criticalValues - sqrt(shift * informationRates) / 2), nrow = 2, byrow = TRUE) } probs01 <- .getGroupSequentialProbabilities(decisionMatrix, informationRates) designCharacteristics$averageSampleNumber01 <- .getAverageSampleNumber( design$kMax, design$informationRates, probs01, shift, nFixed ) } design$criticalValues[design$criticalValues >= 7.5 - 1e-8] <- Inf designCharacteristics$.setParameterType("shift", C_PARAM_GENERATED) designCharacteristics$.setParameterType("power", C_PARAM_GENERATED) designCharacteristics$.setParameterType(".probs", C_PARAM_GENERATED) designCharacteristics$.setParameterType("rejectionProbabilities", C_PARAM_GENERATED) designCharacteristics$.setParameterType("information", C_PARAM_GENERATED) designCharacteristics$.setParameterType("futilityProbabilities", C_PARAM_GENERATED) designCharacteristics$.setParameterType("averageSampleNumber0", C_PARAM_GENERATED) designCharacteristics$.setParameterType("averageSampleNumber01", C_PARAM_GENERATED) designCharacteristics$.setParameterType("averageSampleNumber1", C_PARAM_GENERATED) designCharacteristics$inflationFactor <- shift / nFixed designCharacteristics$.setParameterType("inflationFactor", C_PARAM_GENERATED) if (is.na(designCharacteristics$inflationFactor) || designCharacteristics$inflationFactor > 4 || designCharacteristics$inflationFactor < 1 - 1e-05) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "inflation factor cannot be calculated") } return(designCharacteristics) } .getAverageSampleNumber <- function(kMax, informationRates, probs, shift, nFixed) { if (nrow(probs) == 3) { return((shift - sum((probs[3, 1:(kMax - 1)] - probs[2, 1:(kMax - 1)] + probs[1, 1:(kMax - 1)]) * (informationRates[kMax] - informationRates[1:(kMax - 1)]) * shift)) / nFixed) } else { return((shift - sum((probs[5, 1:(kMax - 1)] - probs[4, 1:(kMax - 1)] + probs[3, 1:(kMax - 1)] - probs[2, 1:(kMax - 1)] + probs[1, 1:(kMax - 1)]) * (informationRates[kMax] - informationRates[1:(kMax - 1)]) * shift)) / nFixed) } } #' #' @title #' Get Power And Average Sample Number #' #' @description #' Returns the power and average sample number of the specified design. #' #' @inheritParams param_design #' @inheritParams param_theta #' @inheritParams param_nMax #' #' @details #' This function returns the power and average sample number (ASN) of the specified #' design for the prototype case which is testing H0: mu = mu0 in a one-sample design. #' \code{theta} represents the standardized effect \code{(mu - mu0) / sigma} and power and ASN #' is calculated for maximum sample size \code{nMax}. #' For other designs than the one-sample test of a mean the standardized effect needs to be adjusted accordingly. #' #' @return Returns a \code{\link{PowerAndAverageSampleNumberResult}} object. #' The following generics (R generic functions) are available for this result object: #' \itemize{ #' \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, #' \item \code{\link[=print.FieldSet]{print()}} to print the object, #' \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, #' \item \code{\link[=plot.ParameterSet]{plot()}} to plot the object, #' \item \code{\link[=as.data.frame.PowerAndAverageSampleNumberResult]{as.data.frame()}} #' to coerce the object to a \code{\link[base]{data.frame}}, #' \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. #' } #' @template how_to_get_help_for_generics #' #' @family design functions #' #' @template examples_get_power_and_average_sample_number #' #' @export #' getPowerAndAverageSampleNumber <- function(design, theta = seq(-1, 1, 0.02), nMax = 100) { .assertIsTrialDesign(design) .assertIsSingleNumber(nMax, "nMax") .assertIsInClosedInterval(nMax, "nMax", lower = 1, upper = NULL) return(PowerAndAverageSampleNumberResult(design = design, theta = theta, nMax = nMax)) } .getSimulatedRejectionsDelayedResponse <- function(delta, informationRates, delayedInformation, contRegionUpper, contRegionLower, decisionCriticalValues, iterations, seed = NA_real_) { seed <- .setSeed(seed) kMax <- length(informationRates) zVector <- numeric(kMax) reject <- 0L for (i in 1:iterations) { for (stage in 1:kMax) { if (stage == 1) { zVector[stage] <- stats::rnorm(1, delta * sqrt(informationRates[1]), 1) } else { zVector[stage] <- (sqrt(informationRates[stage - 1]) * zVector[stage - 1] + sqrt(informationRates[stage] - informationRates[stage - 1]) * stats::rnorm(1, delta * sqrt(informationRates[stage] - informationRates[stage - 1]), 1)) / sqrt(informationRates[stage]) } if (!is.na(decisionCriticalValues[stage]) && stage < kMax && (zVector[stage] > contRegionUpper[stage] || zVector[stage] < contRegionLower[stage])) { if ((sqrt(informationRates[stage]) * zVector[stage] + sqrt(delayedInformation[stage]) * stats::rnorm(1, delta * sqrt(delayedInformation[stage]), 1)) / sqrt(informationRates[stage] + delayedInformation[stage]) > decisionCriticalValues[stage]) { reject <- reject + 1L } break } if (stage == kMax && zVector[stage] > decisionCriticalValues[stage]) { reject <- reject + 1L } } } simulatedAlpha <- reject / iterations return(list( simulatedAlpha = simulatedAlpha, delta = delta, iterations = iterations, seed = seed )) } #' #' @title #' Simulated Rejections Delayed Response #' #' @description #' Simulates the rejection probability of a delayed response group sequential design with specified parameters. #' #' @inheritParams param_design #' @param delta The delay value. #' @param iterations The number of simulation iterations. #' @inheritParams param_seed #' #' @details #' By default, delta = 0, i.e., the Type error rate is simulated. #' #' @return Returns a list summarizing the rejection probabilities. #' #' @noRd #' getSimulatedRejectionsDelayedResponse <- function(design, ..., delta = 0, iterations = 10000, seed = NA_real_) { .assertIsTrialDesignInverseNormalOrGroupSequential(design) .assertIsSingleNumber(delta, "delta") .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) if (!design$.isDelayedResponseDesign()) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be a delayed response design with specified 'delayedInformation'") } startTime <- Sys.time() result <- .getSimulatedRejectionsDelayedResponse( delta = delta, informationRates = design$informationRates, delayedInformation = design$delayedInformation, contRegionUpper = design$criticalValues, contRegionLower = design$futilityBounds, decisionCriticalValues = design$decisionCriticalValues, iterations = iterations, seed = seed ) simulatedAlpha <- result$simulatedAlpha stdError <- sqrt(simulatedAlpha * (1 - simulatedAlpha) / iterations) ciLower <- simulatedAlpha - 1.96 * stdError ciUpper <- simulatedAlpha + 1.96 * stdError result$confidenceIntervall <- c(ciLower, ciUpper) # simulated Type I error rate is within the 95% error bounds result$alphaWithin95ConfidenceIntervall <- design$alpha > ciLower && design$alpha < ciUpper result$time <- Sys.time() - startTime return(result) } rpact/R/f_core_plot.R0000644000176200001440000017337314445307575014236 0ustar liggesusers## | ## | *Plot functions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R NULL .addNumberToPlotCaption <- function(caption, type, numberInCaptionEnabled = FALSE) { if (!numberInCaptionEnabled) { return(caption) } return(paste0(caption, " [", type, "]")) } .getPlotCaption <- function(obj, type, numberInCaptionEnabled = FALSE, ..., stopIfNotFound = FALSE) { if (is.null(obj) || length(type) == 0) { return(NA_character_) } .assertIsSingleInteger(type, "type", validateType = FALSE) if (inherits(obj, "TrialDesignPlan")) { if (type == 1) { if (.isTrialDesignPlanSurvival(obj)) { return(.addNumberToPlotCaption("Boundaries Z Scale", type, numberInCaptionEnabled)) } else { return(.addNumberToPlotCaption("Boundaries", type, numberInCaptionEnabled)) } } else if (type == 2) { return(.addNumberToPlotCaption("Boundaries Effect Scale", type, numberInCaptionEnabled)) } else if (type == 3) { return(.addNumberToPlotCaption("Boundaries p Values Scale", type, numberInCaptionEnabled)) } else if (type == 4) { return(.addNumberToPlotCaption("Error Spending", type, numberInCaptionEnabled)) } } if (.isMultiArmSimulationResults(obj)) { if (type == 1) { # Multi-arm, Overall Success return(.addNumberToPlotCaption("Overall Success", type, numberInCaptionEnabled)) } else if (type == 2) { # Multi-arm, Success per Stage return(.addNumberToPlotCaption("Success per Stage", type, numberInCaptionEnabled)) } else if (type == 3) { # Multi-arm, Selected Arms per Stage return(.addNumberToPlotCaption("Selected Arms per Stage", type, numberInCaptionEnabled)) } else if (type == 4) { # Multi-arm, Rejected Arms per Stage return(.addNumberToPlotCaption(ifelse(obj$.design$kMax > 1, "Rejected Arms per Stage", "Rejected Arms" ), type, numberInCaptionEnabled)) } } else if (.isEnrichmentSimulationResults(obj)) { if (type == 1) { # Enrichment, Overall Success return(.addNumberToPlotCaption("Overall Success", type, numberInCaptionEnabled)) } else if (type == 2) { # Enrichment, Success per Stage return(.addNumberToPlotCaption("Success per Stage", type, numberInCaptionEnabled)) } else if (type == 3) { # Enrichment, Selected Populations per Stage return(.addNumberToPlotCaption("Selected Populations per Stage", type, numberInCaptionEnabled)) } else if (type == 4) { # Enrichment, Rejected Populations per Stage return(.addNumberToPlotCaption(ifelse(obj$.design$kMax > 1, "Rejected Populations per Stage", "Rejected Populations" ), type, numberInCaptionEnabled)) } } else if (inherits(obj, "SimulationResults") && type == 4) { return(.addNumberToPlotCaption("Reject per Stage", type, numberInCaptionEnabled)) } if (inherits(obj, "TrialDesignPlan") || inherits(obj, "SimulationResults")) { if (type == 5) { if (obj$.isSampleSizeObject()) { return(.addNumberToPlotCaption("Sample Size", type, numberInCaptionEnabled)) } else { return(.addNumberToPlotCaption( "Overall Power and Early Stopping", type, numberInCaptionEnabled )) } } else if (type == 6) { return(.addNumberToPlotCaption(ifelse(.isTrialDesignPlanSurvival(obj) || inherits(obj, "SimulationResultsSurvival"), "Number of Events", "Sample Size" ), type, numberInCaptionEnabled)) } else if (type == 7) { return(.addNumberToPlotCaption("Overall Power", type, numberInCaptionEnabled)) } else if (type == 8) { return(.addNumberToPlotCaption("Overall Early Stopping", type, numberInCaptionEnabled)) } else if (type == 9) { if (.isTrialDesignPlanSurvival(obj) || inherits(obj, "SimulationResultsSurvival")) { return(.addNumberToPlotCaption("Expected Number of Events", type, numberInCaptionEnabled)) } else { return(.addNumberToPlotCaption("Expected Sample Size", type, numberInCaptionEnabled)) } } else if (type == 10) { return(.addNumberToPlotCaption("Study Duration", type, numberInCaptionEnabled)) } else if (type == 11) { return(.addNumberToPlotCaption("Expected Number of Subjects", type, numberInCaptionEnabled)) } else if (type == 12) { return(.addNumberToPlotCaption("Analysis Time", type, numberInCaptionEnabled)) } else if (type == 13) { return(.addNumberToPlotCaption("Cumulative Distribution Function", type, numberInCaptionEnabled)) } else if (type == 14) { return(.addNumberToPlotCaption("Survival Function", type, numberInCaptionEnabled)) } } else if (inherits(obj, "TrialDesign") || inherits(obj, "TrialDesignSet")) { if (type == 1) { return(.addNumberToPlotCaption("Boundaries", type, numberInCaptionEnabled)) } else if (type == 3) { return(.addNumberToPlotCaption("Stage Levels", type, numberInCaptionEnabled)) } else if (type == 4) { return(.addNumberToPlotCaption("Error Spending", type, numberInCaptionEnabled)) } else if (type == 5) { return(.addNumberToPlotCaption("Power and Early Stopping", type, numberInCaptionEnabled)) } else if (type == 6) { return(.addNumberToPlotCaption( "Average Sample Size and Power / Early Stop", type, numberInCaptionEnabled )) } else if (type == 7) { return(.addNumberToPlotCaption("Power", type, numberInCaptionEnabled)) } else if (type == 8) { return(.addNumberToPlotCaption("Early Stopping", type, numberInCaptionEnabled)) } else if (type == 9) { return(.addNumberToPlotCaption("Average Sample Size", type, numberInCaptionEnabled)) } } else if (inherits(obj, "AnalysisResults")) { if (type == 1) { return(.addNumberToPlotCaption(C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, type, numberInCaptionEnabled)) } else if (type == 2) { return(.addNumberToPlotCaption("Repeated Confidence Intervals", type, numberInCaptionEnabled)) } } else if (inherits(obj, "StageResults")) { return(.addNumberToPlotCaption(C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, type, numberInCaptionEnabled)) } if (stopIfNotFound) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "could not find plot caption for ", .getClassName(obj), " and type ", type) } return(NA_character_) } .getPlotTypeNumber <- function(type, x) { if (missing(type) || is.null(type) || length(type) == 0 || all(is.na(type))) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'type' must be defined") } if (!is.numeric(type) && !is.character(type)) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'type' must be an integer or character value or vector (is ", .getClassName(type), ")" ) } if (is.numeric(type)) { .assertIsIntegerVector(type, "type", naAllowed = FALSE, validateType = FALSE) } if (is.character(type)) { if (length(type) == 1 && type == "all") { availablePlotTypes <- getAvailablePlotTypes(x) if (is.null(availablePlotTypes)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "function 'getAvailablePlotTypes' not implemented for ", .getClassName(x)) } return(availablePlotTypes) } types <- getAvailablePlotTypes(x, output = "numeric") captions <- tolower(getAvailablePlotTypes(x, output = "caption")) typeNumbers <- c() for (typeStr in type) { if (grepl("^\\d+$", typeStr)) { typeNumbers <- c(typeNumbers, as.integer(typeStr)) } else { index <- pmatch(tolower(typeStr), captions) if (!is.na(index)) { typeNumbers <- c(typeNumbers, types[index]) } else { index <- grep(tolower(typeStr), captions) if (length(index) > 0) { for (i in index) { typeNumbers <- c(typeNumbers, types[i]) } } } } } if (length(typeNumbers) > 0) { return(unique(typeNumbers)) } message("Available plot types: ", .arrayToString(tolower( getAvailablePlotTypes(x, output = "caption") ), encapsulate = TRUE)) stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", .arrayToString(type), ") could not be identified") } return(type) } .createPlotResultObject <- function(plotList, grid = 1) { .assertIsSingleInteger(grid, "grid", naAllowed = FALSE, validateType = FALSE) .assertIsInClosedInterval(grid, "grid", lower = 0, upper = 100) if (length(plotList) == 0) { if (grid == 0) { return(invisible(plotList)) } return(plotList) } if (!inherits(plotList[[1]], "ggplot") || grid == 1) { return(plotList) } if (grid == 0) { for (p in plotList) { suppressMessages(print(p)) } return(invisible(plotList)) } if (length(plotList) > grid) { return(plotList) } plotCmd <- NA_character_ if (grid > 1) { if ("ggpubr" %in% rownames(installed.packages())) { if (length(plotList) < 8 && length(plotList) %% 2 == 1) { plotCmd <- paste0( "ggpubr::ggarrange(plotList[[1]], ", "ggpubr::ggarrange(plotlist = plotList[2:", length(plotList), "]), ncol = 1)" ) } else if (length(plotList) == 2) { plotCmd <- paste0("ggpubr::ggarrange(plotlist = plotList, ncol = 1)") } else { plotCmd <- paste0("ggpubr::ggarrange(plotlist = plotList)") } } else if ("gridExtra" %in% rownames(installed.packages())) { ncol <- ifelse(length(plotList) == 2, 1, 2) plotCmd <- paste0("gridExtra::grid.arrange(grobs = plotList, ncol = ", ncol, ")") } else if ("cowplot" %in% rownames(installed.packages())) { plotCmd <- "cowplot::plot_grid(plotlist = plotList)" } else { message( "Unable to create grid plot because neither 'ggpubr', 'gridExtra', nor 'cowplot' are installed. ", "Install one of these packages to enable grid plots" ) } } if (!is.na(plotCmd)) { tryCatch( { return(eval(parse(text = plotCmd))) }, error = function(e) { warning("Failed to create grid plot using command '", plotCmd, "': ", e$message) } ) } return(plotList) } .printPlotShowSourceSeparator <- function(showSource, typeNumber, typeNumbers) { if (is.logical(showSource) && !showSource) { return(invisible()) } if (length(typeNumbers) == 1) { return(invisible()) } if (typeNumber == typeNumbers[length(typeNumbers)]) { return(invisible()) } cat("--\n") } #' @rdname getAvailablePlotTypes #' @export plotTypes <- function(obj, output = c("numeric", "caption", "numcap", "capnum"), numberInCaptionEnabled = FALSE) { return(getAvailablePlotTypes( obj = obj, output = output, numberInCaptionEnabled = numberInCaptionEnabled )) } .isValidVariedParameterVectorForPlotting <- function(resultObject, plotType) { if (plotType > 12) { return(TRUE) } for (param in c("alternative", "pi1", "hazardRatio", "muMaxVector", "piMaxVector", "omegaMaxVector")) { if (!is.null(resultObject[[param]]) && resultObject$.getParameterType(param) != C_PARAM_NOT_APPLICABLE && (any(is.na(resultObject[[param]])) || length(resultObject[[param]]) <= 1)) { return(FALSE) } } if (!is.null(resultObject[["hazardRatio"]]) && !is.null(resultObject[["overallReject"]]) && resultObject$.getParameterType("hazardRatio") != C_PARAM_NOT_APPLICABLE && resultObject$.getParameterType("overallReject") != C_PARAM_NOT_APPLICABLE && length(resultObject$hazardRatio) > 0 && length(resultObject$hazardRatio) != length(resultObject$overallReject)) { return(FALSE) } return(TRUE) } .removeInvalidPlotTypes <- function(resultObject, plotTypes, plotTypesToCheck) { if (is.null(plotTypes) || length(plotTypes) == 0) { return(integer(0)) } validPlotTypes <- integer(0) for (plotType in plotTypes) { if (!(plotType %in% plotTypesToCheck)) { validPlotTypes <- c(validPlotTypes, plotType) } else if (.isValidVariedParameterVectorForPlotting(resultObject, plotType)) { validPlotTypes <- c(validPlotTypes, plotType) } } return(validPlotTypes) } #' #' @title #' Get Available Plot Types #' #' @description #' Function to identify the available plot types of an object. #' #' @param obj The object for which the plot types shall be identified, e.g. produced by #' \code{\link[=getDesignGroupSequential]{getDesignGroupSequential()}} or \code{\link[=getSampleSizeMeans]{getSampleSizeMeans()}}. #' @param output The output type. Can be one of \code{c("numeric", "caption", "numcap", "capnum")}. #' @param numberInCaptionEnabled If \code{TRUE}, the number will be added to the #' caption, default is \code{FALSE}. #' #' @details #' \code{plotTypes} and \code{getAvailablePlotTypes()} are equivalent, i.e., #' \code{plotTypes} is a short form of \code{getAvailablePlotTypes()}. #' #' \code{output}: #' \enumerate{ #' \item \code{numeric}: numeric output #' \item \code{caption}: caption as character output #' \item \code{numcap}: list with number and caption #' \item \code{capnum}: list with caption and number #' } #' #' @return Returns a list if \code{option} is either \code{capnum} or {numcap} #' or returns a vector that is of character type for \code{option=caption} or #' of numeric type for \code{option=numeric}. #' #' @examples #' design <- getDesignInverseNormal(kMax = 2) #' getAvailablePlotTypes(design, "numeric") #' plotTypes(design, "caption") #' getAvailablePlotTypes(design, "numcap") #' plotTypes(design, "capnum") #' #' @export #' getAvailablePlotTypes <- function(obj, output = c("numeric", "caption", "numcap", "capnum"), numberInCaptionEnabled = FALSE) { output <- match.arg(output) if (is.null(obj)) { if (output == "numeric") { return(NA_real_) } if (output == "caption") { return(NA_character_) } return(list()) } types <- integer(0) if (inherits(obj, "TrialDesignPlan")) { if (obj$.design$kMax > 1) { types <- c(types, 1:4) } types <- c(types, 5) if (obj$.isSampleSizeObject()) { if (.isTrialDesignPlanSurvival(obj)) { types <- c(types, 13, 14) } } else { types <- c(types, 6:9) if (.isTrialDesignPlanSurvival(obj)) { types <- c(types, 10:14) } } types <- .removeInvalidPlotTypes(obj, types, c(5:14)) } else if (inherits(obj, "SimulationResults")) { if (grepl("Enrichment", .getClassName(obj)) && !.getSimulationEnrichmentEffectData( obj, validatePlotCapability = FALSE )$valid) { if (output == "numeric") { return(NA_real_) } if (output == "caption") { return(NA_character_) } return(list()) } if (grepl("MultiArm|Enrichment", .getClassName(obj))) { types <- c(types, 1) if (obj$.design$kMax > 1) { types <- c(types, 2:3) } } types <- c(types, 4) if (!grepl("MultiArm", .getClassName(obj)) || obj$.design$kMax > 1) { types <- c(types, 5:6) } types <- c(types, 7) if (obj$.design$kMax > 1) { types <- c(types, 8) } if (!grepl("MultiArm", .getClassName(obj)) || obj$.design$kMax > 1) { types <- c(types, 9) } if (inherits(obj, "SimulationResultsSurvival")) { types <- c(types, 10:14) } plotTypesToCheck <- c(4:14) if (grepl("MultiArm", .getClassName(obj))) { plotTypesToCheck <- c(1:14) } types <- .removeInvalidPlotTypes(obj, types, plotTypesToCheck) } else if (inherits(obj, "TrialDesign") || inherits(obj, "TrialDesignSet")) { design <- obj if (inherits(obj, "TrialDesignSet")) { design <- obj$getDesignMaster() } if (design$kMax > 1) { types <- c(types, 1, 3) } if (inherits(design, "TrialDesignFisher")) { types <- c(types, 4) } else { types <- c(types, 4:9) } } else if (inherits(obj, "AnalysisResults")) { types <- integer(0) if (.isConditionalPowerEnabled(obj$nPlanned)) { types <- c(1) } types <- c(types, 2) } else if (inherits(obj, "StageResults")) { types <- c(1) } if (output == "numeric") { return(types) } if (output == "caption") { captions <- character(0) for (type in types) { captions <- c(captions, .getPlotCaption(obj, type = type, numberInCaptionEnabled = numberInCaptionEnabled )) } return(captions) } if (output == "numcap") { numcap <- list() for (type in types) { numcap[[as.character(type)]] <- .getPlotCaption(obj, type = type, numberInCaptionEnabled = numberInCaptionEnabled ) } return(numcap) } capnum <- list() for (type in types) { capnum[[.getPlotCaption(obj, type = type, numberInCaptionEnabled = numberInCaptionEnabled )]] <- type } return(capnum) } .getVariedParameterHint <- function(variedParameter, variedParameterName) { return(paste0( "Note: interim values between ", round(variedParameter[1], 4), " and ", round(variedParameter[2], 4), " were calculated to get smoother lines; use, e.g., '", variedParameterName, " = ", .getVariedParameterVectorSeqCommand(variedParameter), "' to get all interim values" )) } .getRexepSaveCharacter <- function(x) { x <- gsub("\\$", "\\\\$", x) x <- gsub("\\.", "\\\\.", x) return(x) } .createValidParameterName <- function(objectName, parameterName) { if (grepl(paste0(.getRexepSaveCharacter(objectName), "\\$"), parameterName) && !grepl("^\\.design", parameterName)) { return(parameterName) } if (is.null(objectName) || length(objectName) == 0 || is.na(objectName)) { return(parameterName) } if (grepl("^-?\\.?get[A-Z]{1}", parameterName)) { return(parameterName) } if (grepl("^rpact::", parameterName)) { return(parameterName) } return(paste0(objectName, "$", parameterName)) } .showPlotSourceInformation <- function(objectName, ..., xParameterName, yParameterNames, hint = NA_character_, nMax = NA_integer_, type = NA_integer_, showSource = FALSE, xValues = NA_real_, lineType = TRUE) { if (is.character(showSource)) { if (length(showSource) != 1 || trimws(showSource) == "") { return(invisible(NULL)) } if (!(showSource %in% C_PLOT_SHOW_SOURCE_ARGUMENTS)) { warning("'showSource' (", showSource, ") is not allowed and will be ignored", call. = FALSE) return(invisible()) } } else if (!isTRUE(showSource)) { return(invisible(NULL)) } .assertIsSingleCharacter(xParameterName, "xParameterName") if (length(yParameterNames) == 0 || !all(is.character(yParameterNames)) || all(is.na(yParameterNames))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'yParameterNames' (", .arrayToString(yParameterNames), ") must be a valid character vector" ) } .assertIsSingleCharacter(hint, "hint", naAllowed = TRUE) .assertIsSingleNumber(nMax, "nMax", naAllowed = TRUE) .assertIsNumericVector(xValues, "xValues", naAllowed = TRUE) cat("Source data of the plot", ifelse(!is.na(type), paste0( " (type ", type, ")" ), ""), ":\n", sep = "") xAxisCmd <- .reconstructSequenceCommand(xValues) if (is.na(xAxisCmd)) { if (!grepl("(\\$)|(^c\\()", xParameterName) || grepl("^\\.design", xParameterName)) { if (length(objectName) == 0 || is.na(objectName)) { objectName <- "x" } xAxisCmd <- paste0(objectName, "$", xParameterName) } else { xAxisCmd <- xParameterName } } if (!is.na(nMax) && length(yParameterNames) < 3 && xParameterName == "informationRates") { xAxisCmd <- paste0(xAxisCmd, " * ", round(nMax, 1)) } cat(" x-axis: ", xAxisCmd, "\n", sep = "") if (all(c("futilityBounds", "criticalValues") %in% yParameterNames)) { yParameterNames[1] <- paste0( "c(", objectName, "$futilityBounds, ", objectName, "$criticalValues[length(", objectName, "$criticalValues)])" ) } else if (identical(yParameterNames, c("futilityBoundsEffectScale", "criticalValuesEffectScale"))) { yParameterNames[1] <- paste0( "c(", objectName, "$futilityBoundsEffectScale, ", objectName, "$criticalValuesEffectScale[length(", objectName, "$criticalValuesEffectScale)])" ) } yAxisCmds <- c() if (length(yParameterNames) == 1) { yAxisCmds <- .createValidParameterName(objectName, yParameterNames) } else { for (yParameterName in yParameterNames) { yAxisCmds <- c(yAxisCmds, .createValidParameterName(objectName, yParameterName)) } } if (length(yAxisCmds) == 1) { cat(" y-axis: ", yAxisCmds, "\n", sep = "") } else { cat(" y-axes:\n") for (i in 1:length(yAxisCmds)) { cat(" y", i, ": ", yAxisCmds[i], "\n", sep = "") } } if (!is.na(hint) && is.character(hint) && nchar(hint) > 0) { cat(hint, "\n", sep = "") } # add simple plot command examples cat("Simple plot command example", ifelse(length(yAxisCmds) == 1, "", "s"), ":\n", sep = "") plotCmds <- c() for (yAxisCmd in yAxisCmds) { plotCmd <- paste0("plot(", xAxisCmd, ", ", yAxisCmd) if (lineType) { plotCmd <- paste0(plotCmd, ", type = \"l\"") } plotCmd <- paste0(plotCmd, ")") plotCmds <- c(plotCmds, plotCmd) cat(" ", plotCmd, "\n", sep = "") } if (showSource == "commands") { return(invisible(plotCmds)) } else if (showSource == "axes") { return(invisible(list(x = xAxisCmd, y = yAxisCmds))) } else if (showSource == "test") { success <- TRUE for (plotCmd in plotCmds) { if (!.testPlotCommand(plotCmd)) { success <- FALSE } } if (success) { cat("All plot commands are valid\n") } else { cat("One ore more plot commands are invalid\n") } return(invisible(plotCmds)) } else if (showSource == "validate") { for (plotCmd in plotCmds) { .testPlotCommand(plotCmd, silent = FALSE) } return(invisible(plotCmds)) } return(invisible(NULL)) } .testPlotCommand <- function(plotCmd, silent = TRUE) { tryCatch( { eval(parse(text = plotCmd)) return(invisible(TRUE)) }, error = function(e) { msg <- paste0( "failed to evaluate plot command \"", plotCmd, "\" ", "('", as.character(e$call), "'): ", e$message ) if (!silent) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, msg[1]) } cat(.firstCharacterToUpperCase(msg), "\n") } ) return(invisible(FALSE)) } .getParameterSetAsDataFrame <- function(..., parameterSet, designMaster, addPowerAndAverageSampleNumber = FALSE, theta = seq(-1, 1, 0.02), nMax = NA_integer_, mandatoryParameterNames = character(0), yParameterNames = character(0)) { if (.isTrialDesignSet(parameterSet) && parameterSet$getSize() > 1 && (is.null(parameterSet$variedParameters) || length(parameterSet$variedParameters) == 0)) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'variedParameters' must be not empty; ", "use 'DesignSet$addVariedParameters(character)' to add one or more varied parameters" ) } if (inherits(parameterSet, "TrialDesignSet")) { suppressWarnings(data <- as.data.frame(parameterSet, niceColumnNamesEnabled = FALSE, includeAllParameters = TRUE, addPowerAndAverageSampleNumber = addPowerAndAverageSampleNumber, theta = theta, nMax = nMax )) } else { parameterNames <- parameterSet$.getVisibleFieldNamesOrdered() suppressWarnings(data <- .getAsDataFrame( parameterSet = parameterSet, parameterNames = parameterNames, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, mandatoryParameterNames = mandatoryParameterNames )) } if (!.isTrialDesignSet(parameterSet)) { variedParameters <- logical(0) if ("stages" %in% colnames(data)) { if ((!.isTrialDesignPlan(parameterSet) && !("overallReject" %in% yParameterNames)) || any(grepl("rejectPerStage|numberOfSubjects", yParameterNames))) { variedParameters <- "stages" names(variedParameters) <- "Stage" } } return(list(data = data, variedParameters = variedParameters)) } if (parameterSet$getSize() <= 1) { return(list(data = data, variedParameters = parameterSet$variedParameters)) } variedParameters <- parameterSet$variedParameters if (nrow(data) > 1) { for (variedParameter in variedParameters) { column <- data[[variedParameter]] if (length(column) <= 1) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "varied parameter '", variedParameter, "' has length ", length(column) ) } valueBefore <- column[1] for (i in 2:length(column)) { if (is.na(column[i])) { column[i] <- valueBefore } else { valueBefore <- column[i] } } data[[variedParameter]] <- column } } variedParameterNames <- c() for (variedParameter in variedParameters) { variedParameterNames <- c( variedParameterNames, .getTableColumnNames(design = designMaster)[[variedParameter]] ) } names(variedParameters) <- variedParameterNames return(list(data = data, variedParameters = variedParameters)) } .getCategories <- function(data, yParameterName, tableColumnNames) { if (is.null(data$categories) || sum(is.na(data$categories)) > 0) { return(rep(.getAxisLabel(yParameterName, tableColumnNames), nrow(data))) } return(paste(data$categories, .getAxisLabel(yParameterName, tableColumnNames), sep = ", ")) } .getAxisLabel <- function(parameterName, tableColumnNames) { axisLabel <- tableColumnNames[[parameterName]] if (is.null(axisLabel)) { return(paste0("%", parameterName, "%")) } return(axisLabel) } .allGroupValuesEqual <- function(data, parameterName, groupName) { groupedValues <- base::by(data[[parameterName]], data[[groupName]], paste, collapse = ",") groupedValues <- groupedValues[!grepl("^NA(,NA)*$", groupedValues)] if (length(groupedValues) <= 1) { return(TRUE) } for (i in 1:(length(groupedValues) - 1)) { for (j in (i + 1):length(groupedValues)) { if (!is.na(groupedValues[i]) && !is.na(groupedValues[j]) && groupedValues[i] != groupedValues[j]) { return(FALSE) } } } return(TRUE) } .plotParameterSet <- function(..., parameterSet, designMaster, xParameterName, yParameterNames, mainTitle = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, variedParameters = logical(0), qnormAlphaLineEnabled = TRUE, yAxisScalingEnabled = TRUE, ratioEnabled = NA, plotSettings = NULL) { simulationEnrichmentEnmabled <- grepl("SimulationResultsEnrichment", .getClassName(parameterSet)) if (.isParameterSet(parameterSet) || .isTrialDesignSet(parameterSet)) { parameterNames <- c(xParameterName, yParameterNames) parameterNames <- parameterNames[!(parameterNames %in% c( "theta", "averageSampleNumber", "overallEarlyStop", "calculatedPower" ))] fieldNames <- c( names(parameterSet$getRefClass()$fields()), names(designMaster$getRefClass()$fields()) ) if (simulationEnrichmentEnmabled) { fieldNames <- c(fieldNames, gsub("s$", "", names(parameterSet$effectList)), "situation") } for (parameterName in parameterNames) { if (!is.na(parameterName) && !(parameterName %in% fieldNames)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", .getClassName(parameterSet), "' and '", .getClassName(designMaster), "' ", "do not contain a field with name '", parameterName, "'" ) } } if (is.null(plotSettings) || !inherits(plotSettings, "PlotSettings")) { plotSettings <- parameterSet$getPlotSettings() } } else { if (is.null(plotSettings) || !inherits(plotSettings, "PlotSettings")) { plotSettings <- PlotSettings() } } if (.isTrialDesignSet(parameterSet)) { parameterSet$assertHaveEqualSidedValues() } addPowerAndAverageSampleNumber <- xParameterName == "theta" && yParameterNames[1] %in% c( "averageSampleNumber", "calculatedPower", "overallEarlyStop", "overallReject", "overallFutility" ) if (!addPowerAndAverageSampleNumber) { addPowerAndAverageSampleNumber <- xParameterName %in% c("effect", "effectMatrix") && yParameterNames[1] %in% c( "overallReject", "futilityStop", "earlyStop", "expectedNumberOfSubjects", "expectedNumberOfEvents" ) } if (addPowerAndAverageSampleNumber && .isMultiArmSimulationResults(parameterSet)) { addPowerAndAverageSampleNumber <- FALSE } if (.isParameterSet(parameterSet) || .isTrialDesignSet(parameterSet)) { df <- .getParameterSetAsDataFrame( parameterSet = parameterSet, designMaster = designMaster, addPowerAndAverageSampleNumber = addPowerAndAverageSampleNumber, theta = theta, nMax = nMax, mandatoryParameterNames = c(xParameterName, yParameterNames), yParameterNames = yParameterNames ) data <- df$data variedParameters <- df$variedParameters variedParameters <- na.omit(variedParameters) variedParameters <- variedParameters[variedParameters != "NA"] if (length(variedParameters) == 1 && length(yParameterNames) == 1) { if (.allGroupValuesEqual(data, parameterName = yParameterNames, groupName = variedParameters)) { variedParameters <- logical(0) } } } else if (is.data.frame(parameterSet)) { data <- parameterSet } else { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'parameterSet' (", .getClassName(parameterSet), ") must be a data.frame, a 'TrialDesignSet' ", "or an object that inherits from 'ParameterSet'" ) } if (length(variedParameters) > 0) { legendTitle <- .firstCharacterToUpperCase(paste(names(variedParameters), collapse = "\n")) categoryParameterName <- variedParameters[1] } else { legendTitle <- NA_character_ categoryParameterName <- NA_character_ } yParameterName1 <- yParameterNames[1] yParameterName2 <- NULL yParameterName3 <- NULL if (length(yParameterNames) >= 2) { yParameterName2 <- yParameterNames[2] } if (length(yParameterNames) >= 3) { yParameterName3 <- yParameterNames[3] } mirrorModeEnabled <- any(grepl("Mirrored$", yParameterNames)) tableColumnNames <- .getTableColumnNames(design = designMaster) xAxisLabel <- .getAxisLabel(xParameterName, tableColumnNames) yAxisLabel1 <- .getAxisLabel(yParameterName1, tableColumnNames) yAxisLabel2 <- NULL if (!is.null(yParameterName2) && !is.null(yParameterName3)) { if (!is.na(yParameterName2)) { pn2 <- .getAxisLabel(yParameterName2, tableColumnNames) if (yParameterName2 == "overallEarlyStop") { pn2 <- "Stopping Probability" } yAxisLabel2 <- paste(pn2, .getAxisLabel(yParameterName3, tableColumnNames), sep = " and ") } else { yAxisLabel2 <- .getAxisLabel(yParameterName3, tableColumnNames) } } else if (xParameterName == "effectMatrix" && !is.null(yParameterName2) && !is.na(yParameterName2) && yParameterName1 %in% c("expectedNumberOfEvents", "expectedNumberOfSubjects") && yParameterName2 == "rejectAtLeastOne") { # special case: simulation results, plot type 6 (expected number of subjects and power) yAxisLabel2 <- .getAxisLabel(yParameterName2, tableColumnNames) yParameterName3 <- yParameterName2 yParameterName2 <- NA_character_ } else if (!is.null(yParameterName2) && !mirrorModeEnabled) { yAxisLabel1 <- paste(yAxisLabel1, .getAxisLabel(yParameterName2, tableColumnNames), sep = " and ") } if (yParameterName1 %in% c("alphaSpent", "betaSpent")) { yAxisLabel1 <- "Cumulative Error" if (is.null(yParameterName2)) { yAxisLabel1 <- paste0(yAxisLabel1, " (", .getAxisLabel(yParameterName1, tableColumnNames), ")") } } yAxisLabel1 <- sub(paste0(C_PARAMETER_NAMES[["futilityBoundsDelayedInformation"]], " and"), "Lower and", yAxisLabel1, fixed = TRUE ) yAxisLabel1 <- sub(paste0(C_PARAMETER_NAMES[["futilityBoundsDelayedInformationNonBinding"]], " and"), "Lower and", yAxisLabel1, fixed = TRUE ) if (!("xValues" %in% colnames(data)) || !("yValues" %in% colnames(data))) { if (!(xParameterName %in% colnames(data))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, sQuote(xParameterName), " is not available in dataset") } if (!(yParameterName1 %in% colnames(data))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, sQuote(yParameterName1), " is not available in dataset") } data$xValues <- data[[xParameterName]] data$yValues <- data[[yParameterName1]] if (yParameterName1 == "futilityBounds") { data$yValues[!is.na(data$yValues) & (is.infinite(data$yValues) | data$yValues == C_FUTILITY_BOUNDS_DEFAULT)] <- NA_real_ } else if (yParameterName1 == "alpha0Vec") { data$yValues[!is.na(data$yValues) & data$yValues == C_ALPHA_0_VEC_DEFAULT] <- NA_real_ } if (is.null(yParameterName2) || is.na(yParameterName2)) { data$yValues2 <- rep(NA_real_, nrow(data)) } else { if (!(yParameterName2 %in% colnames(data))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, sQuote(yParameterName2), " is not available in dataset") } data$yValues2 <- data[[yParameterName2]] } if (is.null(yParameterName3)) { data$yValues3 <- rep(NA_real_, nrow(data)) } else { if (!(yParameterName3 %in% colnames(data))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, sQuote(yParameterName3), " is not available in dataset") } data$yValues3 <- data[[yParameterName3]] } if (!is.na(categoryParameterName)) { data$categories <- data[[categoryParameterName]] if (length(variedParameters) > 1) { data$categories <- paste0( variedParameters[1], " = ", data$categories, ", ", variedParameters[2], " = ", data[[variedParameters[2]]] ) } } else { data$categories <- rep(NA_character_, nrow(data)) } } if (!is.na(nMax) && is.null(yParameterName3) && xParameterName == "informationRates") { xAxisLabel <- "Sample Size" data$xValues <- data$xValues * nMax tryCatch( { data$xValues <- as.numeric(.formatSampleSizes(data$xValues)) }, error = function(e) { warning("Failed to format sample sizes on x-axis: ", e$message) } ) } # add zero point to data if (yParameterName1 %in% c("alphaSpent", "betaSpent")) { data <- data[, c("xValues", "yValues", "yValues2", "categories")] uc <- unique(data$categories) data <- rbind(data.frame( xValues = rep(-0.00001, length(uc)), yValues = rep(0, length(uc)), yValues2 = rep(0, length(uc)), categories = uc ), data) } scalingFactor1 <- 1 scalingFactor2 <- 1 if (!is.null(yParameterName2) && "yValues2" %in% colnames(data) && "yValues3" %in% colnames(data)) { if (yAxisScalingEnabled && !is.null(yParameterName3)) { if (is.na(yParameterName2)) { scalingFactors <- .getScalingFactors(data$yValues, data$yValues3) } else { scalingFactors <- .getScalingFactors(data$yValues, c(data$yValues2, data$yValues3)) } scalingFactor1 <- scalingFactors$scalingFactor1 scalingFactor2 <- scalingFactors$scalingFactor2 } df1 <- data.frame( xValues = data$xValues, yValues = data$yValues * scalingFactor1, categories = .getCategories(data, yParameterName1, tableColumnNames) ) if (!is.na(yParameterName2)) { df2 <- data.frame( xValues = data$xValues, yValues = data$yValues2 * scalingFactor2, categories = .getCategories(data, yParameterName2, tableColumnNames) ) } if (!is.null(yParameterName3)) { df3 <- data.frame( xValues = data$xValues, yValues = data$yValues3 * scalingFactor2, categories = .getCategories(data, yParameterName3, tableColumnNames) ) if (is.na(yParameterName2)) { data <- rbind(df1, df3) } else { data <- rbind(df1, df2, df3) } } else { data <- rbind(df1, df2) } # sort categories for pairwise printing of the legend unqiueValues <- unique(as.character(data$categories)) decreasing <- addPowerAndAverageSampleNumber && xParameterName %in% c("effect", "effectMatrix") catLevels <- unqiueValues[order(unqiueValues, decreasing = decreasing)] data$categories <- factor(data$categories, levels = catLevels) if (!is.na(legendTitle) && yParameterName1 == "alphaSpent" && yParameterName2 == "betaSpent") { sep <- ifelse(length(legendTitle) > 0 && nchar(legendTitle) > 0, "\n", "") legendTitle <- paste(legendTitle, "Type of error", sep = sep) } } if (is.na(legendPosition)) { legendPosition <- .getLegendPosition( plotSettings, designMaster, data, yParameterName1, yParameterName2, addPowerAndAverageSampleNumber ) } if (is.na(ratioEnabled)) { ratioEnabled <- .isTrialDesignPlanSurvival(parameterSet) || (.isTrialDesignPlanMeans(parameterSet) && parameterSet$meanRatio) || (.isTrialDesignPlanRates(parameterSet) && parameterSet$riskRatio) } plotDashedHorizontalLine <- "criticalValuesEffectScale" %in% yParameterNames && designMaster$sided == 2 p <- .plotDataFrame(data, mainTitle = mainTitle, xlab = xlab, ylab = ylab, xAxisLabel = xAxisLabel, yAxisLabel1 = yAxisLabel1, yAxisLabel2 = yAxisLabel2, palette = palette, plotPointsEnabled = plotPointsEnabled, legendTitle = legendTitle, legendPosition = legendPosition, scalingFactor1 = scalingFactor1, scalingFactor2 = scalingFactor2, addPowerAndAverageSampleNumber = addPowerAndAverageSampleNumber, mirrorModeEnabled = mirrorModeEnabled, plotDashedHorizontalLine = plotDashedHorizontalLine, ratioEnabled = ratioEnabled, plotSettings = plotSettings, sided = designMaster$sided, ... ) if (xParameterName == "informationRates") { p <- p + ggplot2::scale_x_continuous(breaks = c(0, round(data$xValues, 3))) } else if (xParameterName == "situation") { # simulation enrichment p <- p + ggplot2::scale_x_continuous(breaks = round(data$xValues)) } # add mirrored lines if (!is.data.frame(parameterSet) && designMaster$sided == 2 && ((yParameterName1 == "criticalValues" || yParameterName1 == "criticalValuesEffectScale") || (!is.null(yParameterName2) && !is.na(yParameterName2) && (yParameterName2 == "criticalValues" || yParameterName2 == "criticalValuesEffectScale")))) { p <- plotSettings$mirrorYValues(p, yValues = data$yValues, plotPointsEnabled = !addPowerAndAverageSampleNumber, pointBorder = .getPointBorder(data, plotSettings) ) # add zero line for Pampallona Tsiatis design p <- p + ggplot2::geom_hline(yintercept = 0, linetype = "solid") # longdash } if (!.isTrialDesignFisher(designMaster) && qnormAlphaLineEnabled && ( ( !is.data.frame(parameterSet) && ( yParameterName1 == "criticalValues" || ( yParameterName1 == "futilityBounds" && !is.null(yParameterName2) && yParameterName2 == "criticalValues" ) ) ) || ( !is.null(yParameterName2) && grepl("futilityBounds|criticalValues", yParameterName1) && grepl("criticalValues", yParameterName2) ) ) ) { p <- .addQnormAlphaLine(p, designMaster, plotSettings, data) } if (!.isTrialDesignFisher(designMaster) && (xParameterName == "informationRates" || xParameterName == "eventsPerStage") && yParameterName1 == "stageLevels") { yValue <- designMaster$alpha if (designMaster$sided == 2) { yValue <- yValue / 2 } p <- p + ggplot2::geom_hline(yintercept = yValue, linetype = "dashed") yValueLabel <- paste0("alpha == ", round(yValue, 4)) hjust <- plotSettings$scaleSize(-0.2) p <- p + ggplot2::annotate("label", x = -Inf, hjust = hjust, y = yValue, label = yValueLabel, size = plotSettings$scaleSize(2.5), parse = TRUE, colour = "white", fill = "white" ) p <- p + ggplot2::annotate("text", x = -Inf, hjust = hjust - plotSettings$scaleSize(0.15), y = yValue, label = yValueLabel, size = plotSettings$scaleSize(2.5), parse = TRUE ) } return(p) } .naAndNaNOmit <- function(x) { if (is.null(x) || length(x) == 0) { return(x) } x <- na.omit(x) return(x[!is.nan(x)]) } .getScalingFactors <- function(leftAxisValues, rightAxisValues) { m1 <- ifelse(length(.naAndNaNOmit(leftAxisValues)) == 0, 1, max(.naAndNaNOmit(leftAxisValues))) m2 <- ifelse(length(.naAndNaNOmit(rightAxisValues)) == 0, 1, max(.naAndNaNOmit(rightAxisValues))) if (is.na(m1)) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "y-values, left (", .arrayToString(leftAxisValues), ") are not specified correctly" ) } if (is.na(m2)) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "y-values, right (", .arrayToString(rightAxisValues), ") are not specified correctly" ) } if (m1 > m2) { scalingFactor1 <- 1 scalingFactor2 <- ifelse(m2 == 0, m1, m1 / m2) } else if (m1 < m2) { scalingFactor1 <- ifelse(m1 == 0, m2, m2 / m1) scalingFactor2 <- 1 } else { scalingFactor1 <- 1 scalingFactor2 <- 1 } if (is.infinite(scalingFactor2)) { stop( "Failed to calculate 'scalingFactor2' (", scalingFactor2, ") for ", .arrayToString(leftAxisValues, maxLength = 15), " and ", .arrayToString(rightAxisValues, maxLength = 15) ) } return(list(scalingFactor1 = scalingFactor1, scalingFactor2 = scalingFactor2)) } .plotDataFrame <- function(data, ..., mainTitle = NA_character_, xlab = NA_character_, ylab = NA_character_, xAxisLabel = NA_character_, yAxisLabel1 = NA_character_, yAxisLabel2 = NA_character_, palette = "Set1", plotPointsEnabled = NA, legendTitle = NA_character_, legendPosition = NA_integer_, scalingFactor1 = 1, scalingFactor2 = 1, addPowerAndAverageSampleNumber = FALSE, mirrorModeEnabled = FALSE, plotDashedHorizontalLine = FALSE, ratioEnabled = FALSE, plotSettings = NULL, sided = 1, discreteXAxis = FALSE) { if (!is.data.frame(data)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'data' must be a data.frame (is ", .getClassName(data), ")") } if (is.null(plotSettings)) { plotSettings <- PlotSettings() } nRow <- nrow(data) data <- data[!(data$xValues == 0 & data$xValues == data$yValues), ] removedRows1 <- nRow - nrow(data) nRow <- nrow(data) data <- data[!is.na(data$yValues), ] removedRows2 <- nRow - nrow(data) if (getLogLevel() == C_LOG_LEVEL_WARN && (removedRows1 > 0 || removedRows2 > 0)) { warning(sprintf( "Removed %s rows containing (0, 0)-points and %s rows containing missing values", removedRows1, removedRows2 ), call. = FALSE) } categoryEnabled <- !is.null(data[["categories"]]) && !all(is.na(data$categories)) groupEnabled <- !is.null(data[["groups"]]) && !all(is.na(data$groups)) if (categoryEnabled && groupEnabled) { data <- data[, c("xValues", "yValues", "categories", "groups")] } else if (categoryEnabled) { data <- data[, c("xValues", "yValues", "categories")] } else if (groupEnabled) { data <- data[, c("xValues", "yValues", "groups")] } else { data <- data[, c("xValues", "yValues")] } data$yValues[!is.na(data$yValues) & is.infinite(data$yValues)] <- NA_real_ data <- data[!is.na(data$yValues), ] if (categoryEnabled && groupEnabled) { p <- ggplot2::ggplot(data, ggplot2::aes( x = .data[["xValues"]], y = .data[["yValues"]], colour = factor(.data[["groups"]]), fill = factor(.data[["categories"]]) )) } else if (mirrorModeEnabled) { p <- ggplot2::ggplot(data, ggplot2::aes( x = .data[["xValues"]], y = .data[["yValues"]], fill = factor(.data[["categories"]]) )) } else if (categoryEnabled) { p <- ggplot2::ggplot(data, ggplot2::aes( x = .data[["xValues"]], y = .data[["yValues"]], colour = factor(.data[["categories"]]) )) } else { p <- ggplot2::ggplot(data, ggplot2::aes(x = .data[["xValues"]], y = .data[["yValues"]])) } p <- plotSettings$setTheme(p) p <- plotSettings$hideGridLines(p) if (discreteXAxis) { p <- p + ggplot2::scale_x_continuous(breaks = round(data$xValues)) } # set main title p <- plotSettings$setMainTitle(p, mainTitle) # set legend if (!categoryEnabled || mirrorModeEnabled || (!is.na(legendPosition) && legendPosition == -1)) { p <- p + ggplot2::theme(legend.position = "none") } else { p <- plotSettings$setLegendPosition(p, legendPosition = legendPosition) p <- plotSettings$setLegendBorder(p) p <- plotSettings$setLegendTitle(p, legendTitle) p <- plotSettings$setLegendLabelSize(p) } # set optional scale limits xLim <- .getOptionalArgument("xlim", ...) yLim <- .getOptionalArgument("ylim", ...) if (is.null(yLim) && !missing(yAxisLabel1) && !is.na(yAxisLabel1) && yAxisLabel1 == "Critical value") { yMax <- max(na.omit(data$yValues)) if (length(yMax) == 1 && yMax < 0.1) { yLim <- c(0, 2 * yMax) } } if ((!is.null(xLim) && is.numeric(xLim) && length(xLim) == 2) || (!is.null(yLim) && is.numeric(yLim) && length(yLim) == 2)) { p <- p + ggplot2::coord_cartesian( xlim = xLim, ylim = yLim, expand = TRUE, default = FALSE, clip = "on" ) } # add dashed line to y = 0 or y = 1 if (mirrorModeEnabled || plotDashedHorizontalLine) { p <- p + ggplot2::geom_hline(yintercept = ifelse(ratioEnabled, 1, 0), linetype = "dashed") } xAxisLabel <- .toCapitalized(xAxisLabel) yAxisLabel1 <- .toCapitalized(yAxisLabel1) yAxisLabel2 <- .toCapitalized(yAxisLabel2) p <- plotSettings$setAxesLabels(p, xAxisLabel = xAxisLabel, yAxisLabel1 = yAxisLabel1, yAxisLabel2 = yAxisLabel2, xlab = xlab, ylab = ylab, scalingFactor1 = scalingFactor1, scalingFactor2 = scalingFactor2 ) # plot lines and points plotPointsEnabled <- ifelse(is.na(plotPointsEnabled), !addPowerAndAverageSampleNumber, plotPointsEnabled ) if (length(unique(data$xValues)) > 20) { plotPointsEnabled <- FALSE } p <- plotSettings$plotValues(p, plotPointsEnabled = plotPointsEnabled, pointBorder = .getPointBorder(data, plotSettings) ) p <- plotSettings$setAxesAppearance(p) p <- plotSettings$setColorPalette(p, palette) p <- plotSettings$enlargeAxisTicks(p) companyAnnotationEnabled <- .getOptionalArgument("companyAnnotationEnabled", ...) if (is.null(companyAnnotationEnabled) || !is.logical(companyAnnotationEnabled)) { companyAnnotationEnabled <- FALSE } p <- plotSettings$addCompanyAnnotation(p, enabled = companyAnnotationEnabled) # start plot generation return(p) } .getPointBorder <- function(data, plotSettings) { numberOfCategories <- 1 if (sum(is.na(data$categories)) < length(data$categories)) { numberOfCategories <- length(unique(as.character(data$categories))) } pointBorder <- 4 if (length(unique(data$xValues)) / numberOfCategories > 10) { pointBorder <- 1 plotSettings$adjustPointSize(0.333) } else if (numberOfCategories > 8) { pointBorder <- 1 } else if (numberOfCategories > 6) { pointBorder <- 2 } else if (numberOfCategories > 4) { pointBorder <- 3 } return(pointBorder) } .getLegendPosition <- function(plotSettings, designMaster, data, yParameterName1, yParameterName2, addPowerAndAverageSampleNumber) { if (length(unique(data$categories)) > 6) { plotSettings$adjustPointSize(0.8) plotSettings$adjustLegendFontSize(0.8) return(C_POSITION_OUTSIDE_PLOT) } if (.isTrialDesignWithValidFutilityBounds(designMaster) && yParameterName1 == "futilityBounds" && yParameterName2 == "criticalValues") { return(C_POSITION_RIGHT_BOTTOM) } if (.isTrialDesignWithValidAlpha0Vec(designMaster) && yParameterName1 == "alpha0Vec" && yParameterName2 == "criticalValues") { return(C_POSITION_RIGHT_TOP) } if (yParameterName1 == "criticalValues") { return(C_POSITION_RIGHT_TOP) } if (yParameterName1 %in% c("stageLevels", "alphaSpent", "betaSpent")) { return(C_POSITION_LEFT_TOP) } if (addPowerAndAverageSampleNumber) { return(C_POSITION_LEFT_CENTER) } return(C_POSITION_OUTSIDE_PLOT) } .addQnormAlphaLine <- function(p, designMaster, plotSettings, data, annotationEnabled = TRUE) { alpha <- designMaster$alpha if (designMaster$sided == 2) { alpha <- alpha / 2 } yValue <- .getOneMinusQNorm(alpha) yValueLabel <- paste0("qnorm(1 - ", alpha, " ) == ", round(yValue, 4)) if (designMaster$sided == 1) { p <- p + ggplot2::geom_hline(yintercept = yValue, linetype = "dashed") } else { p <- p + ggplot2::geom_hline(yintercept = yValue, linetype = "dashed") p <- p + ggplot2::geom_hline(yintercept = -yValue, linetype = "dashed") } if (annotationEnabled) { p <- p + ggplot2::annotate("label", x = -Inf, hjust = plotSettings$scaleSize(-0.1), y = yValue, label = yValueLabel, size = plotSettings$scaleSize(2.5), parse = TRUE, colour = "white", fill = "white" ) p <- p + ggplot2::annotate("text", x = -Inf, hjust = plotSettings$scaleSize(-0.15), y = yValue, label = yValueLabel, size = plotSettings$scaleSize(2.5), parse = TRUE ) } # expand y-axis range if (designMaster$sided == 1) { yMax <- max(stats::na.omit(data$yValues)) if (!is.null(data$yValues2) && length(data$yValues2) > 0) { yMax <- max(yMax, stats::na.omit(data$yValues2)) } eps <- (yMax - yValue) * 0.15 p <- plotSettings$expandAxesRange(p, y = yValue - eps) } return(p) } .getLambdaStepFunctionByTime <- function(time, piecewiseSurvivalTime, lambda2) { if (length(piecewiseSurvivalTime) == 0 || any(is.na(piecewiseSurvivalTime))) { return(lambda2[1]) } for (i in 1:length(piecewiseSurvivalTime)) { if (time <= piecewiseSurvivalTime[i]) { return(lambda2[i]) } } return(lambda2[length(lambda2)]) } .getLambdaStepFunction <- function(timeValues, piecewiseSurvivalTime, piecewiseLambda) { if (length(piecewiseSurvivalTime) != length(piecewiseLambda)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piecewiseSurvivalTime' (", length(piecewiseSurvivalTime), ") must be equal to length of 'piecewiseLambda' (", length(piecewiseLambda), ") - 1" ) } piecewiseSurvivalTime <- .getPiecewiseExpStartTimesWithoutLeadingZero(piecewiseSurvivalTime) if (length(piecewiseSurvivalTime) == 0) { return(piecewiseLambda[1]) } lambdaValues <- c() for (time in timeValues) { lambdaValues <- c(lambdaValues, .getLambdaStepFunctionByTime(time, piecewiseSurvivalTime, piecewiseLambda)) } return(lambdaValues) } #' #' @title #' Get Lambda Step Function #' #' @description #' Calculates the lambda step values for a given time vector. #' #' @param timeValues A numeric vector that specifies the time values for which the lambda step values shall be calculated. #' @param piecewiseSurvivalTime A numeric vector that specifies the time intervals for the piecewise #' definition of the exponential survival time cumulative distribution function (see details). #' @param piecewiseLambda A numeric vector that specifies the assumed hazard rate in the treatment group. #' @inheritParams param_three_dots #' #' @details #' The first element of the vector \code{piecewiseSurvivalTime} must be equal to \code{0}. #' This function is used for plotting of sample size survival results #' (cf., \code{\link[=plot.TrialDesignPlan]{plot}}, \code{type = 13} and \code{type = 14}). #' #' @return A numeric vector containing the lambda step values that corresponds to the specified time values. #' #' @export #' #' @keywords internal #' getLambdaStepFunction <- function(timeValues, ..., piecewiseSurvivalTime, piecewiseLambda) { .assertIsNumericVector(timeValues, "timeValues") .assertIsNumericVector(piecewiseSurvivalTime, "piecewiseSurvivalTime") .assertIsNumericVector(piecewiseLambda, "piecewiseLambda") .warnInCaseOfUnknownArguments(functionName = "getLambdaStepFunction", ...) .getLambdaStepFunction( timeValues = timeValues, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda ) } .getRelativeFigureOutputPath <- function(subDir = NULL) { if (is.null(subDir)) { subDir <- format(Sys.Date(), format = "%Y-%m-%d") } figPath <- file.path(getwd(), "_examples", "output", "figures", subDir) if (!dir.exists(figPath)) { dir.create(figPath, showWarnings = FALSE, recursive = TRUE) } return(figPath) } # @title # Save Last Plot # # @description # Saves the last plot to a PNG file located in # '[getwd()]/_examples/output/figures/[current date]/[filename].png'. # # @param filename The filename (without extension!). # # @details # This is a wrapper function that creates a output path and uses \code{ggsave} to save the last plot. # # @examples # # # saveLastPlot('my_plot') # # @keywords internal # saveLastPlot <- function(filename, outputPath = .getRelativeFigureOutputPath()) { .assertGgplotIsInstalled() if (grepl("\\\\|/", filename)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'filename' seems to be a path. ", "Please specify 'outputPath' separately" ) } if (!grepl("\\.png$", filename)) { filename <- paste0(filename, ".png") } path <- file.path(outputPath, filename) ggplot2::ggsave( filename = path, plot = ggplot2::last_plot(), device = NULL, path = NULL, scale = 1.2, width = 16, height = 15, units = "cm", dpi = 600, limitsize = TRUE ) cat("Last plot was saved to '", path, "'\n") } .getGridPlotSettings <- function(x, typeNumbers, grid) { if (length(typeNumbers) <= 3 || grid <= 1) { return(NULL) } if (is.null(x[[".plotSettings"]])) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'x' (", .getClassName(x), ") does not contain field .plotSettings") } plotSettings <- x$.plotSettings if (is.null(plotSettings)) { plotSettings <- PlotSettings() } else { plotSettings <- plotSettings$clone() } if (plotSettings$scalingFactor == 1) { plotSettings$scalingFactor <- 0.6 } return(plotSettings) } .getGridLegendPosition <- function(legendPosition, typeNumbers, grid) { if (length(typeNumbers) <= 3 || grid <= 1) { return(NA_integer_) } if (is.na(legendPosition)) { return(-1L) # hide legend } return(legendPosition) } .formatSubTitleValue <- function(value, paramName) { if (paramName == "allocationRatioPlanned") { return(round(value, 2)) } if (paramName %in% c("assumedStDev", "assumedStDevs")) { if (length(value) > 1) { return(paste0("(", .arrayToString(round(value, 1), encapsulate = FALSE), ")")) } return(round(value, 2)) } if (paramName %in% c("piControls", "pi2")) { if (length(value) > 1) { return(paste0("(", .arrayToString(round(value, 3), encapsulate = FALSE), ")")) } return(round(value, 3)) } return(.arrayToString(round(value, 2))) } rpact/R/class_analysis_dataset.R0000644000176200001440000054241414446750002016440 0ustar liggesusers## | ## | *Dataset classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_analysis_utilities.R #' @include f_core_utilities.R #' @include f_object_r_code.R NULL C_KEY_WORDS_GROUPS <- c("group", "groups") C_KEY_WORDS_STAGES <- c("stage", "stages") C_KEY_WORDS_SUBSETS <- c("subset", "subsets") C_KEY_WORDS_SAMPLE_SIZES <- .getAllParameterNameVariants(c("n", "N", "sampleSizes", "sampleSize")) C_KEY_WORDS_MEANS <- c("means", "mean") C_KEY_WORDS_ST_DEVS <- .getAllParameterNameVariants(c("stDevs", "stDev", "stds", "sd")) C_KEY_WORDS_EVENTS <- c("event", "events") C_KEY_WORDS_OVERALL_EVENTS <- .getAllParameterNameVariants(c("overallEvents", "overallEvent")) C_KEY_WORDS_EXPECTED_EVENTS <- .getAllParameterNameVariants(c("expectedEvents", "expectedEvent")) C_KEY_WORDS_VARIANCE_EVENTS <- .getAllParameterNameVariants(c("varianceEvents", "varianceEvent")) C_KEY_WORDS_OVERALL_EXPECTED_EVENTS <- .getAllParameterNameVariants(c("overallExpectedEvents", "overallExpectedEvent")) C_KEY_WORDS_OVERALL_VARIANCE_EVENTS <- .getAllParameterNameVariants(c("overallVarianceEvents", "overallVarianceEvent")) C_KEY_WORDS_OVERALL_SAMPLE_SIZES <- .getAllParameterNameVariants(c( "overallN", "overallSampleSizes", "overallSampleSize" )) C_KEY_WORDS_OVERALL_MEANS <- .getAllParameterNameVariants(c("overallMeans", "overallMean")) C_KEY_WORDS_OVERALL_ST_DEVS <- .getAllParameterNameVariants(c( "overallStDevs", "overallStDev", "overall.sd", "overall_sd" )) C_KEY_WORDS_ALLOCATION_RATIOS <- .getAllParameterNameVariants(c("ar", "allocationRatios", "allocationRatio")) C_KEY_WORDS_LOG_RANKS <- .getAllParameterNameVariants(c("logRanks", "logRank", "lr")) C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS <- .getAllParameterNameVariants(c( "oar", "car", "overallAllocationRatios", "overallAllocationRatio" )) C_KEY_WORDS_OVERALL_LOG_RANKS <- .getAllParameterNameVariants(c("olr", "clr", "overallLogRanks", "overallLogRank")) C_KEY_WORDS <- c( C_KEY_WORDS_GROUPS, C_KEY_WORDS_STAGES, C_KEY_WORDS_SUBSETS, C_KEY_WORDS_SAMPLE_SIZES, C_KEY_WORDS_MEANS, C_KEY_WORDS_ST_DEVS, C_KEY_WORDS_EVENTS, C_KEY_WORDS_OVERALL_EVENTS, C_KEY_WORDS_OVERALL_SAMPLE_SIZES, C_KEY_WORDS_OVERALL_MEANS, C_KEY_WORDS_OVERALL_ST_DEVS, C_KEY_WORDS_ALLOCATION_RATIOS, C_KEY_WORDS_LOG_RANKS, C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, C_KEY_WORDS_OVERALL_LOG_RANKS ) #' @title #' Read Dataset #' #' @description #' Reads a data file and returns it as dataset object. #' #' @param file A CSV file (see \code{\link[utils]{read.table}}). #' @param header A logical value indicating whether the file contains the names of #' the variables as its first line. #' @param sep The field separator character. Values on each line of the file are separated #' by this character. If sep = "," (the default for \code{readDataset}) the separator is a comma. #' @param quote The set of quoting characters. To disable quoting altogether, use #' quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only #' considered for columns read as character, which is all of them unless \code{colClasses} is specified. #' @param dec The character used in the file for decimal points. #' @param fill logical. If \code{TRUE} then in case the rows have unequal length, blank fields #' are implicitly added. #' @param comment.char character: a character vector of length one containing a single character #' or an empty string. Use "" to turn off the interpretation of comments altogether. #' @param fileEncoding character string: if non-empty declares the encoding used on a file #' (not a connection) so the character data can be re-encoded. #' See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'. #' @param ... Further arguments to be passed to code{\link[utils]{read.table}}. #' #' @details #' \code{readDataset} is a wrapper function that uses \code{\link[utils]{read.table}} to read the #' CSV file into a data frame, transfers it from long to wide format with \code{\link[stats]{reshape}} #' and puts the data to \code{\link[=getDataset]{getDataset()}}. #' #' @template return_object_dataset #' #' @seealso #' \itemize{ #' \item \code{\link[=readDatasets]{readDatasets()}} for reading multiple datasets, #' \item \code{\link[=writeDataset]{writeDataset()}} for writing a single dataset, #' \item \code{\link[=writeDatasets]{writeDatasets()}} for writing multiple datasets. #' } #' #' @examples #' \dontrun{ #' dataFileRates <- system.file("extdata", #' "dataset_rates.csv", #' package = "rpact" #' ) #' if (dataFileRates != "") { #' datasetRates <- readDataset(dataFileRates) #' datasetRates #' } #' #' dataFileMeansMultiArm <- system.file("extdata", #' "dataset_means_multi-arm.csv", #' package = "rpact" #' ) #' if (dataFileMeansMultiArm != "") { #' datasetMeansMultiArm <- readDataset(dataFileMeansMultiArm) #' datasetMeansMultiArm #' } #' #' dataFileRatesMultiArm <- system.file("extdata", #' "dataset_rates_multi-arm.csv", #' package = "rpact" #' ) #' if (dataFileRatesMultiArm != "") { #' datasetRatesMultiArm <- readDataset(dataFileRatesMultiArm) #' datasetRatesMultiArm #' } #' #' dataFileSurvivalMultiArm <- system.file("extdata", #' "dataset_survival_multi-arm.csv", #' package = "rpact" #' ) #' if (dataFileSurvivalMultiArm != "") { #' datasetSurvivalMultiArm <- readDataset(dataFileSurvivalMultiArm) #' datasetSurvivalMultiArm #' } #' } #' #' @export #' readDataset <- function(file, ..., header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "", fileEncoding = "UTF-8") { if (!file.exists(file)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the file '", file, "' does not exist") } data <- utils::read.table( file = file, header = header, sep = sep, quote = quote, dec = dec, fill = fill, fileEncoding = fileEncoding, ... ) dataWide <- stats::reshape(data = data, direction = "wide", idvar = "stages", timevar = "groups") colnames(dataWide) <- gsub("\\.", "", colnames(dataWide)) return(getDataset(dataWide)) } #' @title #' Write Dataset #' #' @description #' Writes a dataset to a CSV file. #' #' @param dataset A dataset. #' @param file The target CSV file. #' @param append Logical. Only relevant if file is a character string. #' If \code{TRUE}, the output is appended to the file. If \code{FALSE}, any existing file of the name is destroyed. #' @param sep The field separator character. Values on each line of the file are separated #' by this character. If sep = "," (the default for \code{writeDataset}) the separator is a comma. #' @param quote The set of quoting characters. To disable quoting altogether, use #' quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only #' considered for columns read as character, which is all of them unless \code{colClasses} is specified. #' @param dec The character used in the file for decimal points. #' @param eol The character(s) to print at the end of each line (row). #' @param na The string to use for missing values in the data. #' @param row.names Either a logical value indicating whether the row names of \code{dataset} are #' to be written along with \code{dataset}, or a character vector of row names to be written. #' @param col.names Either a logical value indicating whether the column names of \code{dataset} are #' to be written along with \code{dataset}, or a character vector of column names to be written. #' See the section on 'CSV files' for the meaning of \code{col.names = NA}. #' @param qmethod A character string specifying how to deal with embedded double quote characters #' when quoting strings. Must be one of "double" (default in \code{writeDataset}) or "escape". #' @param fileEncoding Character string: if non-empty declares the encoding used on a file #' (not a connection) so the character data can be re-encoded. #' See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'. #' @param ... Further arguments to be passed to \code{\link[utils]{write.table}}. #' #' @details #' \code{\link[=writeDataset]{writeDataset()}} is a wrapper function that coerces the dataset to a data frame and uses \cr #' \code{\link[utils]{write.table}} to write it to a CSV file. #' #' @seealso #' \itemize{ #' \item \code{\link[=writeDatasets]{writeDatasets()}} for writing multiple datasets, #' \item \code{\link[=readDataset]{readDataset()}} for reading a single dataset, #' \item \code{\link[=readDatasets]{readDatasets()}} for reading multiple datasets. #' } #' #' @examples #' \dontrun{ #' datasetOfRates <- getDataset( #' n1 = c(11, 13, 12, 13), #' n2 = c(8, 10, 9, 11), #' events1 = c(10, 10, 12, 12), #' events2 = c(3, 5, 5, 6) #' ) #' writeDataset(datasetOfRates, "dataset_rates.csv") #' } #' #' @export #' writeDataset <- function(dataset, file, ..., append = FALSE, quote = TRUE, sep = ",", eol = "\n", na = "NA", dec = ".", row.names = TRUE, col.names = NA, qmethod = "double", fileEncoding = "UTF-8") { .assertIsDataset(dataset) x <- as.data.frame(dataset, niceColumnNamesEnabled = FALSE) utils::write.table( x = x, file = file, append = append, quote = quote, sep = sep, eol = eol, na = na, dec = dec, row.names = FALSE, col.names = TRUE, qmethod = qmethod, fileEncoding = fileEncoding ) } #' @title #' Read Multiple Datasets #' #' @description #' Reads a data file and returns it as a list of dataset objects. #' #' @param file A CSV file (see \code{\link[utils]{read.table}}). #' @param header A logical value indicating whether the file contains the names of #' the variables as its first line. #' @param sep The field separator character. Values on each line of the file are separated #' by this character. If sep = "," (the default for \code{readDatasets}) the separator is a comma. #' @param quote The set of quoting characters. To disable quoting altogether, use #' quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only #' considered for columns read as character, which is all of them unless \code{colClasses} is specified. #' @param dec The character used in the file for decimal points. #' @param fill logical. If \code{TRUE} then in case the rows have unequal length, blank fields #' are implicitly added. #' @param comment.char character: a character vector of length one containing a single character #' or an empty string. Use "" to turn off the interpretation of comments altogether. #' @param fileEncoding character string: if non-empty declares the encoding used on a file #' (not a connection) so the character data can be re-encoded. #' See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'. #' @param ... Further arguments to be passed to \code{\link[utils]{read.table}}. #' #' @details #' Reads a file that was written by \code{\link[=writeDatasets]{writeDatasets()}} before. #' #' @return Returns a \code{\link[base]{list}} of \code{\link{Dataset}} objects. #' #' @seealso #' \itemize{ #' \item \code{\link[=readDataset]{readDataset()}} for reading a single dataset, #' \item \code{\link[=writeDatasets]{writeDatasets()}} for writing multiple datasets, #' \item \code{\link[=writeDataset]{writeDataset()}} for writing a single dataset. #' } #' #' @examples #' dataFile <- system.file("extdata", "datasets_rates.csv", package = "rpact") #' if (dataFile != "") { #' datasets <- readDatasets(dataFile) #' datasets #' } #' @export #' readDatasets <- function(file, ..., header = TRUE, sep = ",", quote = "\"", dec = ".", fill = TRUE, comment.char = "", fileEncoding = "UTF-8") { if (!file.exists(file)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the file '", file, "' does not exist") } data <- utils::read.table( file = file, header = header, sep = sep, quote = quote, dec = dec, fill = fill, fileEncoding = fileEncoding, ... ) if (is.null(data[["datasetId"]])) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "data file must contain the column 'datasetId'") } datasets <- list() for (datasetId in unique(data$datasetId)) { subData <- data[data$datasetId == datasetId, ] dataFrame <- subset(subData, select = -datasetId) description <- NA_character_ if (!is.null(dataFrame[["description"]])) { description <- as.character(dataFrame$description[1]) dataFrame <- subset(dataFrame, select = -description) } if (length(unique(subData$groups)) == 2) { dataWide <- stats::reshape(dataFrame, direction = "wide", idvar = "stages", timevar = "groups") colnames(dataWide) <- gsub("\\.", "", colnames(dataWide)) dataset <- getDataset(dataWide) } else { dataset <- getDataset(dataFrame) } dataset$setDescription(description) datasets <- c(datasets, dataset) } return(datasets) } #' @title #' Write Multiple Datasets #' #' @description #' Writes a list of datasets to a CSV file. #' #' @param datasets A list of datasets. #' @param file The target CSV file. #' @param append Logical. Only relevant if file is a character string. #' If \code{TRUE}, the output is appended to the file. If FALSE, any existing file of the name is destroyed. #' @param sep The field separator character. Values on each line of the file are separated #' by this character. If sep = "," (the default for \code{writeDatasets}) the separator is a comma. #' @param quote The set of quoting characters. To disable quoting altogether, use #' quote = "". See scan for the behavior on quotes embedded in quotes. Quoting is only #' considered for columns read as character, which is all of them unless \code{colClasses} is specified. #' @param dec The character used in the file for decimal points. #' @param eol The character(s) to print at the end of each line (row). #' @param na The string to use for missing values in the data. #' @param row.names Either a logical value indicating whether the row names of \code{dataset} are #' to be written along with \code{dataset}, or a character vector of row names to be written. #' @param col.names Either a logical value indicating whether the column names of \code{dataset} are #' to be written along with \code{dataset}, or a character vector of column names to be written. #' See the section on 'CSV files' for the meaning of \code{col.names = NA}. #' @param qmethod A character string specifying how to deal with embedded double quote characters #' when quoting strings. Must be one of "double" (default in \code{writeDatasets}) or "escape". #' @param fileEncoding Character string: if non-empty declares the encoding used on a file #' (not a connection) so the character data can be re-encoded. #' See the 'Encoding' section of the help for file, the 'R Data Import/Export Manual' and 'Note'. #' @param ... Further arguments to be passed to \code{\link[utils]{write.table}}. #' #' @details #' The format of the CSV file is optimized for usage of \code{\link[=readDatasets]{readDatasets()}}. #' #' @seealso #' \itemize{ #' \item \code{\link[=writeDataset]{writeDataset()}} for writing a single dataset, #' \item \code{\link[=readDatasets]{readDatasets()}} for reading multiple datasets, #' \item \code{\link[=readDataset]{readDataset()}} for reading a single dataset. #' } #' #' @examples #' \dontrun{ #' d1 <- getDataset( #' n1 = c(11, 13, 12, 13), #' n2 = c(8, 10, 9, 11), #' events1 = c(10, 10, 12, 12), #' events2 = c(3, 5, 5, 6) #' ) #' d2 <- getDataset( #' n1 = c(9, 13, 12, 13), #' n2 = c(6, 10, 9, 11), #' events1 = c(10, 10, 12, 12), #' events2 = c(4, 5, 5, 6) #' ) #' datasets <- list(d1, d2) #' writeDatasets(datasets, "datasets_rates.csv") #' } #' #' @export #' writeDatasets <- function(datasets, file, ..., append = FALSE, quote = TRUE, sep = ",", eol = "\n", na = "NA", dec = ".", row.names = TRUE, col.names = NA, qmethod = "double", fileEncoding = "UTF-8") { if (!is.list(datasets)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'datasets' must be a list of datasets") } if (length(datasets) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'datasets' is empty") } datasetType <- NA_character_ dataFrames <- NULL for (i in 1:length(datasets)) { dataset <- datasets[[i]] .assertIsDataset(dataset) if (is.na(datasetType)) { datasetType <- .getClassName(dataset) } else if (.getClassName(dataset) != datasetType) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all datasets must have the same type") } data <- as.data.frame(dataset, niceColumnNamesEnabled = FALSE) datasetId <- ifelse(!is.null(dataset$getId()) && !is.na(dataset$getId()), dataset$getId(), i) data <- cbind(rep(datasetId, nrow(data)), data) colnames(data)[1] <- "datasetId" if (!is.null(dataset$getDescription()) && !is.na(dataset$getDescription())) { data <- cbind(data, rep(dataset$getDescription(), nrow(data))) colnames(data)[ncol(data)] <- "description" } if (is.null(dataFrames)) { dataFrames <- data } else { dataFrames <- rbind(dataFrames, data) } } if (is.null(dataFrames)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to bind datasets") } utils::write.table( x = dataFrames, file = file, append = append, quote = quote, sep = sep, eol = eol, na = na, dec = dec, row.names = FALSE, col.names = TRUE, qmethod = qmethod, fileEncoding = fileEncoding ) } .getDataset <- function(..., floatingPointNumbersEnabled = FALSE) { args <- list(...) if (length(args) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data.frame, data vectors, or datasets expected") } if (.optionalArgsContainsDatasets(...)) { if (length(args) == 1) { return(args[[1]]) } design <- .getDesignFromArgs(...) if (length(args) == 2 && !is.null(design)) { dataset <- .getDatasetFromArgs(...) if (!is.null(dataset)) { dataset <- dataset$copy(shallow = FALSE) dataset$.design <- design return(dataset) } } return(.getEnrichmentDatasetFromArgs(...)) } exampleType <- args[["example"]] if (!is.null(exampleType) && exampleType %in% c("means", "rates", "survival")) { return(.getDatasetExample(exampleType = exampleType)) } if (length(args) == 1 && !is.null(args[[1]]) && is.list(args[[1]]) && !is.data.frame(args[[1]])) { return(.getDatasetMeansFromModelsByStage(emmeansResults = args[[1]])) } emmeansResults <- .getDatasetMeansModelObjectsList(args) if (!is.null(emmeansResults) && length(emmeansResults) > 0) { return(.getDatasetMeansFromModelsByStage(emmeansResults = emmeansResults)) } dataFrame <- .getDataFrameFromArgs(...) design <- .getDesignFromArgs(...) if (is.null(dataFrame)) { args <- .removeDesignFromArgs(args) paramNames <- names(args) paramNames <- paramNames[paramNames != ""] numberOfParameters <- length(args) if (numberOfParameters > 0 && names(args)[1] == "" && .isTrialDesign(args[[1]])) { numberOfParameters <- numberOfParameters - 1 } if (length(paramNames) != numberOfParameters) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all parameters must be named") } if (length(paramNames) != length(unique(paramNames))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the parameter names must be unique") } dataFrame <- .createDataFrame(...) } enrichmentEnabled <- .isDataObjectEnrichment(...) if (.isDataObjectMeans(...)) { return(DatasetMeans( dataFrame = dataFrame, floatingPointNumbersEnabled = floatingPointNumbersEnabled, enrichmentEnabled = enrichmentEnabled, .design = design )) } if (.isDataObjectRates(...)) { return(DatasetRates( dataFrame = dataFrame, floatingPointNumbersEnabled = floatingPointNumbersEnabled, enrichmentEnabled = enrichmentEnabled, .design = design )) } if (.isDataObjectNonStratifiedEnrichmentSurvival(...)) { return(DatasetEnrichmentSurvival( dataFrame = dataFrame, floatingPointNumbersEnabled = floatingPointNumbersEnabled, enrichmentEnabled = enrichmentEnabled, .design = design )) } if (.isDataObjectSurvival(...)) { return(DatasetSurvival( dataFrame = dataFrame, floatingPointNumbersEnabled = floatingPointNumbersEnabled, enrichmentEnabled = enrichmentEnabled, .design = design )) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "failed to identify dataset type") } #' @title #' Get Dataset #' #' @description #' Creates a dataset object and returns it. #' #' @param ... A \code{data.frame} or some data vectors defining the dataset. #' @param floatingPointNumbersEnabled If \code{TRUE}, #' sample sizes and event numbers can be specified as floating-point numbers #' (this make sense, e.g., for theoretical comparisons); \cr #' by default \code{floatingPointNumbersEnabled = FALSE}, i.e., #' samples sizes and event numbers defined as floating-point numbers will be truncated. #' #' @details #' The different dataset types \code{DatasetMeans}, of \code{DatasetRates}, or #' \code{DatasetSurvival} can be created as follows: #' \itemize{ #' \item An element of \code{\link{DatasetMeans}} for one sample is created by \cr #' \code{getDataset(sampleSizes =, means =, stDevs =)} where \cr #' \code{sampleSizes}, \code{means}, \code{stDevs} are vectors with stage-wise sample sizes, #' means and standard deviations of length given by the number of available stages. #' \item An element of \code{\link{DatasetMeans}} for two samples is created by \cr #' \code{getDataset(sampleSizes1 =, sampleSizes2 =, means1 =, means2 =, } \cr #' \code{stDevs1 =, stDevs2 =)} where #' \code{sampleSizes1}, \code{sampleSizes2}, \code{means1}, \code{means2}, #' \code{stDevs1}, \code{stDevs2} are vectors with #' stage-wise sample sizes, means and standard deviations for the two treatment groups #' of length given by the number of available stages. #' \item An element of \code{\link{DatasetRates}} for one sample is created by \cr #' \code{getDataset(sampleSizes =, events =)} where \code{sampleSizes}, \code{events} are vectors #' with stage-wise sample sizes and events of length given by the number of available stages. #' \item An element of \code{\link{DatasetRates}} for two samples is created by \cr #' \code{getDataset(sampleSizes1 =, sampleSizes2 =, events1 =, events2 =)} where #' \code{sampleSizes1}, \code{sampleSizes2}, \code{events1}, \code{events2} #' are vectors with stage-wise sample sizes #' and events for the two treatment groups of length given by the number of available stages. #' \item An element of \code{\link{DatasetSurvival}} is created by \cr #' \code{getDataset(events =, logRanks =, allocationRatios =)} where #' \code{events}, \code{logRanks}, and \code{allocation ratios} are the stage-wise events, #' (one-sided) logrank statistics, and allocation ratios. #' \item An element of \code{\link{DatasetMeans}}, \code{\link{DatasetRates}}, and \code{\link{DatasetSurvival}} #' for more than one comparison is created by adding subsequent digits to the variable names. #' The system can analyze these data in a multi-arm many-to-one comparison setting where the #' group with the highest index represents the control group. #' } #' Prefix \code{overall[Capital case of first letter of variable name]...} for the variable #' names enables entering the overall (cumulative) results and calculates stage-wise statistics. #' Since rpact version 3.2, the prefix \code{cumulative[Capital case of first letter of variable name]...} or #' \code{cum[Capital case of first letter of variable name]...} can alternatively be used for this. #' #' \code{n} can be used in place of \code{samplesizes}. #' #' Note that in survival design usually the overall (cumulative) events and logrank test statistics are provided #' in the output, so \cr #' \code{getDataset(cumulativeEvents=, cumulativeLogRanks =, cumulativeAllocationRatios =)} \cr #' is the usual command for entering survival data. Note also that for \code{cumulativeLogranks} also the #' z scores from a Cox regression can be used. #' #' For multi-arm designs, the index refers to the considered comparison. For example,\cr #' \code{ #' getDataset(events1=c(13, 33), logRanks1 = c(1.23, 1.55), events2 = c(16, NA), logRanks2 = c(1.55, NA)) #' } \cr #' refers to the case where one active arm (1) is considered at both stages whereas active arm 2 #' was dropped at interim. Number of events and logrank statistics are entered for the corresponding #' comparison to control (see Examples). #' #' For enrichment designs, the comparison of two samples is provided for an unstratified #' (sub-population wise) or stratified data input.\cr #' For unstratified (sub-population wise) data input the data sets are defined for the sub-populations #' S1, S2, ..., F, where F refers to the full populations. Use of \code{getDataset(S1 = , S2, ..., F = )} #' defines the data set to be used in \code{\link[=getAnalysisResults]{getAnalysisResults()}} (see examples)\cr #' For stratified data input the data sets are defined for the strata S1, S12, S2, ..., R, where R #' refers to the remainder of the strata such that the union of all sets is the full population. #' Use of \code{getDataset(S1 = , S12 = , S2, ..., R = )} defines the data set to be used in #' \code{\link[=getAnalysisResults]{getAnalysisResults()}} (see examples)\cr #' For survival data, for enrichment designs the log-rank statistics should be entered as stratified #' log-rank statistics in order to provide strong control of Type I error rate. For stratified data input, #' the variables to be specified in \code{getDataset()} are \code{events}, \code{expectedEvents}, #' \code{varianceEvents}, and \code{allocationRatios} or \code{overallEvents}, \code{overallExpectedEvents}, #' \code{overallVarianceEvents}, and \code{overallAllocationRatios}. From this, (stratified) log-rank tests are #' calculated. #' #' @template return_object_dataset #' #' @template examples_get_dataset #' #' @include f_analysis_base.R #' @include f_analysis_utilities.R #' #' @export #' getDataset <- function(..., floatingPointNumbersEnabled = FALSE) { dataset <- .getDataset(floatingPointNumbersEnabled = floatingPointNumbersEnabled, ...) if (dataset$.enrichmentEnabled && dataset$getNumberOfGroups() != 2) { warning("Only population enrichment data with 2 groups can be analyzed but ", dataset$getNumberOfGroups(), " group", ifelse(dataset$getNumberOfGroups() == 1, " is", "s are"), " defined", call. = FALSE ) } return(dataset) } #' @rdname getDataset #' @export getDataSet <- function(..., floatingPointNumbersEnabled = FALSE) { return(getDataset(floatingPointNumbersEnabled = floatingPointNumbersEnabled, ...)) } .getDatasetMeansModelObjectsList <- function(args) { if (is.null(args) || length(args) == 0 || !is.list(args)) { return(NULL) } emmeansResults <- list() for (arg in args) { if (inherits(arg, "emmGrid")) { emmeansResults[[length(emmeansResults) + 1]] <- arg } } if (length(emmeansResults) == 0) { return(NULL) } argNames <- names(args) for (i in 1:length(args)) { arg <- args[[i]] if (!inherits(arg, "emmGrid")) { argName <- argNames[i] argInfo <- "" if (length(argName) == 1 && argName != "") { argInfo <- paste0(sQuote(argName), " ") } argInfo <- paste0(argInfo, "(", .arrayToString(arg), ")") warning("Argument ", argInfo, " will be ignored because only 'emmGrid' objects will be respected") } } return(emmeansResults) } .getStandardDeviationFromStandardError <- function(sampleSize, standardError, ..., dfValue = NA_real_, alpha = 0.05, lmEnabled = TRUE, stDevCalcMode = "auto") { qtCalcEnablbled <- length(stDevCalcMode) == 1 && !is.na(stDevCalcMode) && stDevCalcMode == "t" if ((qtCalcEnablbled || !lmEnabled) && !is.na(dfValue) && !is.infinite(dfValue) && dfValue > 0) { qValue <- stats::qt(1 - alpha / 2, df = dfValue) stDev <- standardError * 2 / qValue * sqrt(sampleSize) } else { stDev <- standardError * sqrt(sampleSize) } return(stDev) } .getDatasetMeansFromModelsByStage <- function(emmeansResults, correctGroupOrder = TRUE) { if (is.null(emmeansResults)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(emmeansResults), " must be a non-empty list") } if (!is.list(emmeansResults)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(emmeansResults), " must be a list") } if (length(emmeansResults) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(emmeansResults), " must be not empty") } for (stage in 1:length(emmeansResults)) { if (!inherits(emmeansResults[[stage]], "emmGrid")) { stop(sprintf( paste0( "%s%s must contain %s objects created by emmeans(x), ", "where x is a linear model result (one object per stage; class is %s at stage %s)" ), C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("emmeansResults"), sQuote("emmGrid"), .getClassName(emmeansResults[[stage]]), stage )) } } stages <- integer(0) groups <- integer(0) means <- numeric(0) stDevs <- numeric(0) sampleSizes <- numeric(0) lmEnabled <- TRUE tryCatch( { modelCall <- emmeansResults[[1]]@model.info$call modelFunction <- as.character(modelCall)[1] lmEnabled <- modelFunction == "lm" if (!grepl(paste0("::", modelFunction), modelFunction)) { packageName <- .getPackageName(modelFunction) if (!is.na(packageName)) { modelFunction <- paste0(packageName, "::", modelFunction) } } if (lmEnabled) { warning("When using ", modelFunction, "() ", "the estimated marginal means and standard deviations can be inaccurate ", "and analysis results based on this values may be imprecise", call. = FALSE ) } else { warning("Using ", modelFunction, " emmeans result objects as ", "arguments of getDataset() is experminental in this rpact version and not fully validated", call. = FALSE ) } }, error = function(e) { warning("Using emmeans result objects as ", "arguments of getDataset() is experminental in this rpact version and not fully validated", call. = FALSE ) } ) stDevCalcMode <- getOption("rpact.dataset.stdev.calc.mode", "auto") # auto, sigma, norm, t for (stage in 1:length(emmeansResults)) { emmeansResult <- emmeansResults[[stage]] emmeansResultsSummary <- summary(emmeansResult) emmeansResultsList <- as.list(emmeansResult) if (is.null(emmeansResultsSummary[["emmean"]])) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "the objects in summary(emmeansResults) must contain the field 'emmean'" ) } for (expectedField in c("sigma", "extras")) { if (is.null(emmeansResultsList[[expectedField]])) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "the objects in as.list(emmeansResults) must contain the field ", sQuote(expectedField) ) } } numberOfGroups <- length(emmeansResultsSummary$emmean) rpactGroupNumbers <- 1:numberOfGroups if (correctGroupOrder) { rpactGroupNumbers <- 1 if (numberOfGroups > 1) { rpactGroupNumbers <- c(2:numberOfGroups, rpactGroupNumbers) } } for (group in 1:length(emmeansResultsSummary$emmean)) { stages <- c(stages, stage) groups <- c(groups, group) rpactGroupNumber <- rpactGroupNumbers[group] standardError <- emmeansResultsSummary$SE[rpactGroupNumber] sampleSize <- emmeansResultsList$extras[rpactGroupNumber, ] meanValue <- emmeansResultsSummary$emmean[rpactGroupNumber] dfValue <- emmeansResultsSummary$df[rpactGroupNumber] if (length(stDevCalcMode) == 1 && !is.na(stDevCalcMode) && stDevCalcMode == "sigma") { # pooled standard deviation from emmeans stDev <- emmeansResultsList$sigma } else { stDev <- .getStandardDeviationFromStandardError(sampleSize, standardError, dfValue = dfValue, lmEnabled = lmEnabled, stDevCalcMode = stDevCalcMode ) } means <- c(means, meanValue) stDevs <- c(stDevs, stDev) sampleSizes <- c(sampleSizes, sampleSize) } } data <- data.frame( stages = stages, groups = groups, means = means, stDevs = stDevs, sampleSizes = sampleSizes ) data <- data[order(data$stages, data$groups), ] dataWide <- stats::reshape(data = data, direction = "wide", idvar = "stages", timevar = "groups") colnames(dataWide) <- gsub("\\.", "", colnames(dataWide)) return(getDataset(dataWide)) } .optionalArgsContainsDatasets <- function(...) { args <- list(...) if (length(args) == 0) { return(FALSE) } for (arg in args) { if (inherits(arg, "Dataset")) { return(TRUE) } } return(FALSE) } .getSubsetsFromArgs <- function(...) { args <- list(...) if (length(args) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "one or more subset datasets expected") } subsetNames <- names(args) if (is.null(subsetNames)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all subsets must be named") } if (!("R" %in% subsetNames) && !("F" %in% subsetNames)) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, '"R" (stratified analysis)" or "F" (non-stratified analysis) must be defined as subset' ) } subsetNumbers <- gsub("\\D", "", subsetNames) subsetNumbers <- subsetNumbers[subsetNumbers != ""] # & nchar(subsetNumbers) == 1 if (length(subsetNumbers) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all subset names (", .arrayToString(subsetNames), ") must be \"S[n]\", \"R\", or \"F\", ", "where [n] is a number with increasing digits (starting with 1)" ) } stratifiedInput <- "R" %in% subsetNames subsetNumbers <- paste0(subsetNumbers, collapse = "") subsetNumbers <- strsplit(subsetNumbers, "")[[1]] subsetNumbers <- as.integer(subsetNumbers) gMax <- max(subsetNumbers) + 1 validSubsetNames <- .createSubsetsByGMax(gMax, stratifiedInput = stratifiedInput, all = FALSE) for (subsetName in subsetNames) { if (subsetName == "") { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all subsets must be named") } if (!(subsetName %in% validSubsetNames)) { suffix <- ifelse(stratifiedInput, " (stratified analysis)", " (non-stratified analysis)") if (length(validSubsetNames) < 10) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "invalid subset name (", subsetName, "); ", "valid names are ", .arrayToString(validSubsetNames), suffix ) } else { restFull <- ifelse(stratifiedInput, '"R"', '"F"') stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "invalid subset name (", subsetName, "): ", "all subset names must be \"S[n]\" or ", restFull, ", ", "where [n] is a number with increasing digits", suffix ) } } } subsets <- NULL subsetType <- NA_character_ emptySubsetNames <- validSubsetNames[!(validSubsetNames %in% subsetNames)] for (subsetName in subsetNames) { subset <- args[[subsetName]] if (is.null(subset) || (!isS4(subset) && is.na(subset))) { emptySubsetNames <- c(emptySubsetNames, subsetName) } else { if (!.isDataset(subset)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "subset ", subsetName, " is not a dataset (is ", .getClassName(subset), ")" ) } if (!is.na(subsetType) && subsetType != .getClassName(subset)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all subsets must have the same type (found ", subsetType, " and ", .getClassName(subset), ")" ) } subsetType <- .getClassName(subset) if (is.null(subset[[".data"]])) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "subset ", subsetName, " does not contain field '.data'" ) } subset <- subset$.data subset$subset <- rep(subsetName, nrow(subset)) if (is.null(subsets)) { subsets <- subset } else { subsets <- rbind(subsets, subset) } } } if (length(emptySubsetNames) > 0) { emptySubsetNames <- unique(emptySubsetNames) template <- subsets[subsets$subset == ifelse(stratifiedInput, "R", "F"), ] colNames <- colnames(template) colNames <- colNames[!(colNames %in% c("stage", "group", "subset"))] for (colName in colNames) { template[[colName]] <- rep(NA_real_, nrow(template)) } for (subsetName in emptySubsetNames) { template$subset <- rep(subsetName, nrow(template)) subsets <- rbind(subsets, template) } if (length(emptySubsetNames) == 1) { warning("The undefined subset ", emptySubsetNames, " was defined as empty subset", call. = FALSE ) } else { warning(gettextf( "The %s undefined subsets %s were defined as empty subsets", length(emptySubsetNames), .arrayToString(emptySubsetNames) ), call. = FALSE) } } return(subsets) } .validateEnrichmentDataFrameAtFirstStage <- function(dataFrame, params) { dataFrameStage1 <- dataFrame[dataFrame$stage == 1, ] for (param in params) { paramValue <- dataFrameStage1[[param]] if (any(is.null(paramValue) || any(is.infinite(paramValue)))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, gettextf( "all %s values (%s) at first stage must be valid", sQuote(param), .arrayToString(paramValue, maxLength = 10) ) ) } if (any(is.na(paramValue))) { subsets <- unique(dataFrame$subset) for (s in subsets) { subData <- dataFrame[dataFrame$subset == s, ] subsetParamValues <- subData[[param]] if (!all(is.na(subsetParamValues)) && any(is.na(subsetParamValues[subData$stage == 1]))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, gettextf( "all %s values (%s) at first stage must be valid (NA is not allowed)", sQuote(param), .arrayToString(paramValue, maxLength = 10) ) ) } } } } } .getEndpointSpecificDataFrameParameterNames <- function(dataFrame) { paramNames <- colnames(dataFrame) paramNames <- paramNames[!(paramNames %in% c("stage", "group", "subset"))] return(paramNames) } .validateEnrichmentDataFrameDeselection <- function(dataFrame) { paramNames <- .getEndpointSpecificDataFrameParameterNames(dataFrame) for (i in 1:nrow(dataFrame)) { row <- dataFrame[i, paramNames] if (any(is.na(row)) && !all(is.na(row))) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, gettextf( paste0( "inconsistent deselection in group %s at stage %s (", "%s: all or none must be NA)" ), dataFrame$group[i], dataFrame$stage[i], .arrayToString(paramNames, maxCharacters = 40) ) ) } } subsets <- unique(dataFrame$subset) for (s in subsets) { deselectedStage <- 0 for (stage in unique(dataFrame$stage)) { subData <- dataFrame[dataFrame$subset == s & dataFrame$stage == stage, paramNames] if (deselectedStage > 0 && !all(is.na(subData))) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, gettextf(paste0( "%s was deselected at stage %s ", "and therefore must be also deselected in the following stages, ", "but is no longer deselected in stage %s" ), s, deselectedStage, stage) ) } if (any(is.na(subData))) { deselectedStage <- stage } } } } .validateEnrichmentDataFrameMeans <- function(dataFrame) { if (any(na.omit(dataFrame$stDev) <= 0) || any(na.omit(dataFrame$overallStDev) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all standard deviations must be > 0") } if (any(na.omit(dataFrame$sampleSize) <= 0) || any(na.omit(dataFrame$overallSampleSize) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be > 0") } .validateEnrichmentDataFrameAtFirstStage(dataFrame, params = c("sampleSize", "overallSampleSize", "mean", "overallMean", "stDev", "overallStDev") ) .validateEnrichmentDataFrameDeselection(dataFrame) subsets <- unique(dataFrame$subset) if ("F" %in% subsets) { subsets <- subsets[subsets != "F"] fullData <- dataFrame[dataFrame$subset == "F", ] for (s in subsets) { for (stage in unique(dataFrame$stage)) { for (group in unique(dataFrame$group)) { subData <- dataFrame[dataFrame$subset == s & dataFrame$stage == stage & dataFrame$group == group, ] stDevFull <- na.omit(fullData$stDev[fullData$stage == stage & fullData$group == group]) stDevSubset <- na.omit(subData$stDev) if (length(stDevFull) > 0 && length(stDevSubset) > 0 && any(stDevFull <= stDevSubset)) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, gettextf( "'stDev' F (%s) must be > 'stDev' %s (%s) in group %s at stage %s", .arrayToString(stDevFull), s, .arrayToString(stDevSubset), group, stage ) ) } sampleSizeFull <- na.omit(fullData$sampleSize[fullData$stage == stage & fullData$group == group]) sampleSizeSubset <- na.omit(subData$sampleSize) if (length(sampleSizeFull) > 0 && length(sampleSizeSubset) > 0 && any(sampleSizeFull < sampleSizeSubset)) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, gettextf( "'sampleSize' F (%s) must be >= 'sampleSize' %s (%s) in group %s at stage %s", .arrayToString(sampleSizeFull), s, .arrayToString(sampleSizeSubset), group, stage ) ) } } } } } } .validateEnrichmentDataFrameSurvival <- function(dataFrame) { if (any(na.omit(dataFrame$event) < 0) || any(na.omit(dataFrame$overallEvent) < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all events must be >= 0") } .validateEnrichmentDataFrameAtFirstStage(dataFrame, params = c("event", "overallEvent") ) .validateEnrichmentDataFrameDeselection(dataFrame) subsets <- unique(dataFrame$subset) if ("F" %in% subsets) { subsets <- subsets[subsets != "F"] fullData <- dataFrame[dataFrame$subset == "F", ] for (s in subsets) { for (stage in unique(dataFrame$stage)) { for (group in unique(dataFrame$group)) { subData <- dataFrame[dataFrame$subset == s & dataFrame$stage == stage & dataFrame$group == group, ] eventFull <- na.omit(fullData$event[fullData$stage == stage & fullData$group == group]) eventSubset <- na.omit(subData$event) if (length(eventFull) > 0 && length(eventSubset) > 0 && any(eventFull < eventSubset)) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, gettextf( "'event' F (%s) must be >= 'event' %s (%s) in group %s at stage %s", .arrayToString(eventFull), s, .arrayToString(eventSubset), group, stage ) ) } } } } } } .validateEnrichmentDataFrameRates <- function(dataFrame) { if (any(na.omit(dataFrame$sampleSize) <= 0) || any(na.omit(dataFrame$overallSampleSize) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be > 0") } .validateEnrichmentDataFrameAtFirstStage(dataFrame, params = c("sampleSize", "overallSampleSize") ) .validateEnrichmentDataFrameDeselection(dataFrame) subsets <- unique(dataFrame$subset) if ("F" %in% subsets) { subsets <- subsets[subsets != "F"] fullData <- dataFrame[dataFrame$subset == "F", ] for (s in subsets) { for (stage in unique(dataFrame$stage)) { for (group in unique(dataFrame$group)) { subData <- dataFrame[dataFrame$subset == s & dataFrame$stage == stage & dataFrame$group == group, ] sampleSizeFull <- na.omit(fullData$sampleSize[fullData$stage == stage & fullData$group == group]) sampleSizeSubset <- na.omit(subData$sampleSize) if (length(sampleSizeFull) > 0 && length(sampleSizeSubset) > 0 && any(sampleSizeFull < sampleSizeSubset)) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, gettextf( "'sampleSize' F (%s) must be >= 'sampleSize' %s (%s) in group %s at stage %s", .arrayToString(sampleSizeFull), s, .arrayToString(sampleSizeSubset), group, stage ) ) } } } } } .validateEnrichmentDataFrameSurvival(dataFrame) } .validateEnrichmentDataFrameHasConsistentNumberOfStages <- function(dataFrame) { subsets <- unique(dataFrame$subset) kMaxList <- list() for (s in subsets) { subsetStages <- as.integer(sort(unique(na.omit(as.character(dataFrame$stage[dataFrame$subset == s]))))) kMax <- max(subsetStages) if (!identical(1:kMax, subsetStages)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, gettextf("subset %s has incomplete stages (%s)", s, .arrayToString(subsetStages)) ) } kMaxList[[s]] <- kMax } kMax <- unique(unlist(kMaxList)) if (length(kMax) > 1) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "all subsets must have the identical number of stages defined (kMax: ", .listToString(kMaxList), ")" ) } } .validateEnrichmentDataFrame <- function(dataFrame) { paramNames <- colnames(dataFrame) if (any(grepl("(S|s)tDev", paramNames))) { .validateEnrichmentDataFrameMeans(dataFrame) } else if (any(grepl("(S|s)ampleSize", paramNames)) && any(grepl("(E|e)vent", paramNames))) { .validateEnrichmentDataFrameRates(dataFrame) } else if (any(grepl("(L|l)ogRank", paramNames)) || any(grepl("(E|e)xpectedEvent", paramNames))) { .validateEnrichmentDataFrameSurvival(dataFrame) } else { print(paramNames) stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "could not identify the endpoint of the specified dataset") } subsets <- unique(dataFrame$subset) if ("R" %in% subsets) { paramNames <- .getEndpointSpecificDataFrameParameterNames(dataFrame) paramName <- paramNames[1] subsets <- subsets[subsets != "R"] subsets <- subsets[grepl("^S\\d$", subsets)] if (length(subsets) > 0) { restData <- dataFrame[dataFrame$subset == "R", ] for (s in subsets) { stages <- unique(dataFrame$stage) stages <- stages[stages != 1] if (length(stages) > 0) { for (stage in stages) { for (group in unique(dataFrame$group)) { subData <- dataFrame[dataFrame$subset == s & dataFrame$stage == stage & dataFrame$group == group, ] paramValueRest <- restData[[paramName]][restData$stage == stage & restData$group == group] paramValueSubset <- subData[[paramName]] if (length(paramValueRest) > 0 && length(paramValueSubset) > 0 && any(is.na(paramValueSubset)) && !all(is.na(paramValueRest))) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, gettextf( paste0( "if %s is deselected (NA) then R also must be deselected (NA) but, e.g., ", "%s R is %s in group %s at stage %s" ), s, sQuote(paramName), .arrayToString(paramValueRest, vectorLookAndFeelEnabled = TRUE), group, stage ) ) } } } } } } } .validateEnrichmentDataFrameHasConsistentNumberOfStages(dataFrame) } .getEnrichmentDataFrameFromArgs <- function(...) { dataFrame <- .getSubsetsFromArgs(...) validColumns <- c() for (j in 1:ncol(dataFrame)) { if (!all(is.na(dataFrame[, j]))) { validColumns <- c(validColumns, j) } } if (length(validColumns) > 0) { dataFrame <- dataFrame[, validColumns] } return(dataFrame) } .getEnrichmentDatasetFromArgs <- function(...) { dataFrame <- .getEnrichmentDataFrameFromArgs(...) .validateEnrichmentDataFrame(dataFrame) dataFrame <- .getWideFormat(dataFrame) return(.getDataset(dataFrame = dataFrame)) } .getDatasetExample <- function(exampleType) { if (exampleType == "means") { return(getDataset( n1 = c(13, 25), n2 = c(15, NA), n3 = c(14, 27), n4 = c(12, 29), means1 = c(24.2, 22.2), means2 = c(18.8, NA), means3 = c(26.7, 27.7), means4 = c(9.2, 12.2), stDevs1 = c(24.4, 22.1), stDevs2 = c(21.2, NA), stDevs3 = c(25.6, 23.2), stDevs4 = c(21.5, 22.7) )) } else if (exampleType == "rates") { return(getDataset( n1 = c(23, 25), n2 = c(25, NA), n3 = c(24, 27), n4 = c(22, 29), events1 = c(15, 12), events2 = c(19, NA), events3 = c(18, 22), events4 = c(12, 13) )) } else if (exampleType == "survival") { return(getDataset( events1 = c(25, 32), events2 = c(18, NA), events3 = c(22, 36), logRanks1 = c(2.2, 1.8), logRanks2 = c(1.99, NA), logRanks3 = c(2.32, 2.11) )) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'exampleType' (", exampleType, ") is not allowed") } #' #' @name Dataset #' #' @title #' Dataset #' #' @description #' Basic class for datasets. #' #' @template field_stages #' @template field_groups #' #' @details #' \code{Dataset} is the basic class for #' \itemize{ #' \item \code{\link{DatasetMeans}}, #' \item \code{\link{DatasetRates}}, #' \item \code{\link{DatasetSurvival}}, and #' \item \code{\link{DatasetEnrichmentSurvival}}. #' } #' This basic class contains the fields \code{stages} and \code{groups} and several commonly used #' functions. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include f_core_assertions.R #' #' @keywords internal #' #' @importFrom methods new #' Dataset <- setRefClass("Dataset", contains = "ParameterSet", fields = list( .data = "data.frame", .plotSettings = "PlotSettings", .id = "integer", .description = "character", .floatingPointNumbersEnabled = "logical", .kMax = "integer", .enrichmentEnabled = "logical", .inputType = "character", .design = "ANY", stages = "integer", groups = "integer", subsets = "character" ), methods = list( initialize = function(dataFrame, ..., floatingPointNumbersEnabled = FALSE, enrichmentEnabled = FALSE) { callSuper( .floatingPointNumbersEnabled = floatingPointNumbersEnabled, .enrichmentEnabled = enrichmentEnabled, ... ) .plotSettings <<- PlotSettings() .parameterNames <<- .getParameterNames(dataset = .self) .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS .id <<- NA_integer_ .description <<- NA_character_ .inputType <<- NA_character_ if (!missing(dataFrame)) { .initByDataFrame(dataFrame) .kMax <<- getNumberOfStages() if (!.enrichmentEnabled) { .validateDataset() } } }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { "Method for automatically printing dataset objects" .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { .resetCat() if (!is.null(showType) && length(showType) == 1 && !is.na(showType) && is.character(showType) && showType == "rcmd") { s <- strsplit(getObjectRCode(.self, stringWrapParagraphWidth = NULL), "), *")[[1]] s[2:length(s)] <- paste0("\t", s[2:length(s)]) s <- paste0(s, collapse = "),\n") cat(s, "\n") } else if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .showParametersOfOneGroup(.getUserDefinedParameters(), title = .toString(startWithUpperCase = TRUE), orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), title = "Calculated data", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) if (!is.na(.description) && nchar(.description) > 0) { .cat("Description: ", .description, "\n\n", consoleOutputEnabled = consoleOutputEnabled ) } } }, .initByDataFrame = function(dataFrame) { if (!is.data.frame(dataFrame)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataFrame' must be a data.frame (is an instance of class ", .getClassName(dataFrame), ")" ) } if (!.paramExists(dataFrame, "stage") && !.paramExists(dataFrame, "stages")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataFrame' must contain parameter 'stages' or 'stage'" ) } stages <<- as.integer(.getValuesByParameterName(dataFrame, c("stages", "stage"))) if (!.enrichmentEnabled && length(unique(stages)) < length(stages)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stages' (", .arrayToString(stages), ") must be a unique vector of stage numbers" ) } groups <<- rep(1L, length(stages)) .setParameterType("groups", C_PARAM_USER_DEFINED) .setParameterType("stages", C_PARAM_USER_DEFINED) if (any(grepl("^subsets?\\d*$", colnames(dataFrame)))) { numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, c(C_KEY_WORDS_SAMPLE_SIZES, C_KEY_WORDS_LOG_RANKS)) subsets <<- character(0) for (group in 1:numberOfTreatmentGroups) { suffix <- ifelse(any(grepl("^subsets?\\d+$", colnames(dataFrame))), group, "") subsets <<- c(subsets, .getValuesByParameterName(dataFrame, C_KEY_WORDS_SUBSETS, suffix = suffix)) } .setParameterType("subsets", C_PARAM_USER_DEFINED) } else { subsets <<- rep(NA_character_, length(stages)) } }, .validateDataset = function() { .assertIsValidKMax(kMax = getNumberOfStages()) for (var in names(.self)) { values <- .self[[var]] if (any(is.nan(values)) || any(is.infinite(values))) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'", var, "' (", .arrayToString(values), ") contains illegal values, i.e., something went wrong" ) } } }, .validateValues = function(values, name) { if (.enrichmentEnabled) { return(invisible()) } l1 <- length(unique(stages)) l2 <- length(values) if (l1 != l2) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "there ", ifelse(l1 == 1, paste("is", l1, "stage"), paste("are", l1, "stages") ), " defined", " (", .arrayToString(unique(stages)), ") and '", name, "' has length ", l2 ) } }, .recreateDataFrame = function() { .data <<- data.frame( stage = factor(stages), group = factor(groups), subset = factor(subsets) ) }, .setDataToVariables = function() { stages <<- as.integer(.data$stage) groups <<- as.integer(.data$group) subsets <<- as.character(.data$subset) }, .fillWithNAs = function(kMax) { numberOfStages <- getNumberOfStages() .kMax <<- numberOfStages if (numberOfStages >= kMax) { return(invisible()) } numberOfGroups <- getNumberOfGroups(survivalCorrectionEnabled = FALSE) if (.enrichmentEnabled) { for (stage in (numberOfStages + 1):kMax) { for (group in 1:numberOfGroups) { for (subset in levels(.data$subset)) { stages <<- c(stages, stage) groups <<- c(groups, group) subsets <<- c(subsets, subset) } } } } else { for (stage in (numberOfStages + 1):kMax) { for (group in 1:numberOfGroups) { stages <<- c(stages, stage) groups <<- c(groups, group) subsets <<- c(subsets, NA_character_) } } } }, .trim = function(kMax) { if (is.na(kMax)) { kMax <- .kMax } numberOfStages <- getNumberOfStages(FALSE) if (numberOfStages <= kMax) { return(invisible(numeric(0))) } indices <- which(stages <= kMax) stages <<- stages[indices] groups <<- groups[indices] subsets <<- subsets[indices] return(indices) }, .orderDataByStageAndGroup = function() { if (.enrichmentEnabled) { dat <- .data dat$char <- gsub("\\d", "", as.character(.data$subset)) dat$char[dat$char == "R"] <- "Z" dat$char[dat$char == "F"] <- "Z" dat$num <- as.integer(gsub("\\D", "", as.character(.data$subset))) .data <<- .data[order(.data$stage, .data$group, dat$char, dat$num), ] } else { .data <<- .data[order(.data$stage, .data$group), ] } }, .getNumberOfNAsToAdd = function(kMax) { n <- kMax - getNumberOfStages() if (n <= 0) { return(0) } n <- n * getNumberOfGroups(survivalCorrectionEnabled = FALSE) if (.enrichmentEnabled) { n <- n * getNumberOfSubsets() } return(n) }, .paramExists = function(dataFrame, parameterName) { for (p in parameterName) { value <- dataFrame[[p]] if (!is.null(value)) { return(TRUE) } } return(FALSE) }, .getValuesByParameterName = function(dataFrame, parameterNameVariants, ..., defaultValues = NULL, suffix = "") { for (parameterName in parameterNameVariants) { key <- paste0(parameterName, suffix) if (.paramExists(dataFrame, key)) { return(dataFrame[[key]]) } } if (!is.null(defaultValues)) { return(defaultValues) } stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "parameter '", paste0(parameterNameVariants[1], suffix), "' is missing or not correctly specified" ) }, .getValueLevels = function(values) { if (is.factor(values)) { return(levels(values)) } return(sort(unique(na.omit(values)))) }, .getValues = function(paramName, paramValues) { values <- .data[[paramName]] valueLevels <- .getValueLevels(values) if (!all(paramValues %in% valueLevels)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", paramName, "' (", .arrayToString(paramValues), ") out of range [", .arrayToString(valueLevels), "]" ) } return(values) }, .getIndexValues = function(paramName, paramValues, subset = NA_character_) { values <- .getValues(paramName, paramValues) if (all(is.na(subset))) { return(which(values %in% paramValues)) } .assertIsValidSubset(subset) return(which(values %in% paramValues & .data$subset %in% subset)) }, .assertIsValidSubset = function(subset) { for (s in subset) { if (!(s %in% levels(.data$subset))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'subset' (", s, ") is not a defined value [", .arrayToString(levels(.data$subset)), "]" ) } } }, .getIndices = function(..., stage, group, subset = NA_character_) { if (is.null(.data)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'.data' must be defined") } if (!is.null(stage) && !any(is.na(stage)) && all(stage < 0)) { index <- 1:getNumberOfStages() stage <- index[!(index %in% abs(stage))] } if (!is.null(group) && !any(is.na(group)) && all(group < 0)) { index <- 1:getNumberOfGroups(survivalCorrectionEnabled = FALSE) group <- index[!(index %in% abs(group))] } # stage only and optional subset if (!is.null(group) && length(group) == 1 && is.na(group)) { return(.getIndexValues("stage", stage, subset)) } # group only and optional subset if (!is.null(stage) && length(stage) == 1 && is.na(stage)) { return(.getIndexValues("group", group, subset)) } # stage and group and optional subset stageValues <- .getValues("stage", stage) groupValues <- .getValues("group", group) if (all(is.na(subset))) { return(which(stageValues %in% stage & groupValues %in% group)) } .assertIsValidSubset(subset) return(which(stageValues %in% stage & groupValues %in% group & .data$subset %in% subset)) }, .getValidatedFloatingPointNumbers = function(x, parameterName = "Sample sizes") { if (.floatingPointNumbersEnabled) { return(x) } nToCheck <- stats::na.omit(x) if (any(nToCheck != as.integer(nToCheck))) { warning(parameterName, " specified as floating-point numbers were truncated", call. = FALSE) } x[!is.na(x)] <- as.integer(x[!is.na(x)]) return(x) }, .keyWordExists = function(dataFrame, keyWords, suffix = "") { for (key in keyWords) { if (.paramExists(dataFrame, paste0(key, suffix))) { return(TRUE) } } return(FALSE) }, .getNumberOfGroups = function(dataFrame, keyWords) { for (group in 2:1000) { if (!.keyWordExists(dataFrame, keyWords, group)) { return(group - 1) } } return(1) }, .getValidatedStage = function(stage = NA_integer_) { if (all(is.na(stage))) { stage <- c(1:getNumberOfStages()) } return(stage) }, getNumberOfGroups = function(survivalCorrectionEnabled = TRUE) { data <- stats::na.omit(.data) if (!survivalCorrectionEnabled) { return(length(levels(data$group))) } return(length(levels(data$group)) + ifelse(inherits(.self, "DatasetSurvival"), 1, 0)) }, getNumberOfStages = function(naOmitEnabled = TRUE) { if (naOmitEnabled) { colNames <- colnames(.data) validColNames <- character(0) for (colName in colNames) { colValues <- .data[, colName] if (length(colValues) > 0 && !all(is.na(colValues))) { validColNames <- c(validColNames, colName) } } subData <- stats::na.omit(.data[, validColNames]) numberOfStages <- length(unique(as.character(subData$stage))) if (numberOfStages == 0) { print(.data[, validColNames]) stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, ".data seems to contain an invalid column" ) } return(numberOfStages) } return(length(levels(.data$stage))) }, getNumberOfSubsets = function() { return(length(levels(.data$subset))) }, isDatasetMeans = function() { return(inherits(.self, "DatasetMeans")) }, isDatasetRates = function() { return(inherits(.self, "DatasetRates")) }, isDatasetSurvival = function() { return(inherits(.self, "DatasetSurvival")) }, isStratified = function() { return(.enrichmentEnabled && "R" %in% levels(.data$subset)) }, setId = function(id) { .id <<- as.integer(id) }, getId = function() { return(.id) }, setDescription = function(description) { .description <<- description }, getDescription = function() { return(.description) }, .toString = function(startWithUpperCase = FALSE) { s <- "dataset of " if (.enrichmentEnabled) { s <- paste0(s, "enrichment ") } else if (.self$getNumberOfGroups() > 2) { s <- paste0(s, "multi-arm ") } if (isDatasetMeans()) { s <- paste0(s, "means") } else if (isDatasetRates()) { s <- paste0(s, "rates") } else if (isDatasetSurvival()) { s <- paste0(s, "survival data") } else { s <- paste0(s, "unknown endpoint") } return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) } ) ) #' #' @name DatasetMeans #' #' @title #' Dataset of Means #' #' @description #' Class for a dataset of means. #' #' @template field_groups #' @template field_stages #' @template field_sampleSizes #' @template field_means #' @template field_stDevs #' @template field_overallSampleSizes #' @template field_overallMeans #' @template field_overallStDevs #' #' @details #' This object cannot be created directly; better use \code{\link{getDataset}} #' with suitable arguments to create a dataset of means. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' DatasetMeans <- setRefClass("DatasetMeans", contains = "Dataset", fields = list( sampleSizes = "numeric", means = "numeric", stDevs = "numeric", overallSampleSizes = "numeric", overallMeans = "numeric", overallStDevs = "numeric" ), methods = list( getSampleSize = function(stage, group = 1, subset = NA_character_) { return(.data$sampleSize[.getIndices(stage = stage, group = group, subset = subset)]) }, getMean = function(stage, group = 1, subset = NA_character_) { return(.data$mean[.getIndices(stage = stage, group = group, subset = subset)]) }, getStDev = function(stage, group = 1, subset = NA_character_) { return(.data$stDev[.getIndices(stage = stage, group = group, subset = subset)]) }, getSampleSizes = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$sampleSize[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getMeans = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$mean[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getStDevs = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$stDev[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getSampleSizesUpTo = function(to, group = 1, subset = NA_character_) { return(.data$sampleSize[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getMeansUpTo = function(to, group = 1, subset = NA_character_) { return(.data$mean[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getStDevsUpTo = function(to, group = 1, subset = NA_character_) { return(.data$stDev[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getOverallSampleSize = function(stage, group = 1, subset = NA_character_) { return(.data$overallSampleSize[.getIndices(stage = stage, group = group, subset = subset)]) }, getOverallMean = function(stage, group = 1, subset = NA_character_) { return(.data$overallMean[.getIndices(stage = stage, group = group, subset = subset)]) }, getOverallStDev = function(stage, group = 1, subset = NA_character_) { return(.data$overallStDev[.getIndices(stage = stage, group = group, subset = subset)]) }, getOverallSampleSizes = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$overallSampleSize[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getOverallMeans = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$overallMean[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getOverallStDevs = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$overallStDev[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getOverallSampleSizesUpTo = function(to, group = 1, subset = NA_character_) { return(.data$overallSampleSize[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getOverallMeansUpTo = function(to, group = 1, subset = NA_character_) { return(.data$overallMean[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getOverallStDevsUpTo = function(to, group = 1, subset = NA_character_) { return(.data$overallStDev[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, .initByDataFrame = function(dataFrame) { callSuper(dataFrame) # case: one mean - stage wise if (.paramExists(dataFrame, C_KEY_WORDS_SAMPLE_SIZES)) { .inputType <<- "stagewise" sampleSizes <<- .getValidatedFloatingPointNumbers(.getValuesByParameterName( dataFrame, C_KEY_WORDS_SAMPLE_SIZES ), parameterName = "Sample sizes") .validateValues(sampleSizes, "n") if (any(stats::na.omit(sampleSizes) <= 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be > 0, but 'n' = ", .arrayToString(sampleSizes, vectorLookAndFeelEnabled = TRUE) ) } means <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_MEANS) .validateValues(means, "means") stDevs <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_ST_DEVS) .validateValues(stDevs, "stDevs") } # case: one mean - overall else if (.paramExists(dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES)) { .inputType <<- "overall" overallSampleSizes <<- .getValidatedFloatingPointNumbers(.getValuesByParameterName( dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES ), parameterName = "Cumulative sample sizes ") .validateValues(overallSampleSizes, "overallSampleSizes") overallMeans <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_MEANS) .validateValues(overallMeans, "overallMeans") overallStDevs <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_ST_DEVS) .validateValues(overallStDevs, "overallStDevs") } # case: two or more means - stage wise else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_SAMPLE_SIZES, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_SAMPLE_SIZES, 2))) { .inputType <<- "stagewise" numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_SAMPLE_SIZES) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) sampleSizes <<- numeric(0) means <<- numeric(0) stDevs <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { sampleSizesTemp <- .getValidatedFloatingPointNumbers(.getValuesByParameterName( dataFrame, C_KEY_WORDS_SAMPLE_SIZES, suffix = group ), parameterName = "Sample sizes") .validateValues(sampleSizesTemp, paste0("n", group)) if (any(stats::na.omit(sampleSizesTemp) <= 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be > 0, but 'n", group, "' = ", .arrayToString(sampleSizesTemp, vectorLookAndFeelEnabled = TRUE) ) } sampleSizes <<- c(sampleSizes, sampleSizesTemp) meansTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_MEANS, suffix = group) .validateValues(meansTemp, paste0("means", group)) means <<- c(means, meansTemp) stDevsTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_ST_DEVS, suffix = group) .validateValues(stDevsTemp, paste0("stDevs", group)) stDevs <<- c(stDevs, stDevsTemp) groups <<- c(groups, rep(as.integer(group), length(sampleSizesTemp))) } } # case: two or more means - overall else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_SAMPLE_SIZES, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_SAMPLE_SIZES, 2))) { .inputType <<- "overall" numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) sampleSizes <<- numeric(0) means <<- numeric(0) stDevs <<- numeric(0) overallSampleSizes <<- numeric(0) overallMeans <<- numeric(0) overallStDevs <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { overallSampleSizesTemp <- .getValidatedFloatingPointNumbers(.getValuesByParameterName( dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES, suffix = group ), parameterName = "Cumulative sample sizes") .validateValues(overallSampleSizesTemp, paste0("overallSampleSizes", group)) overallSampleSizes <<- c(overallSampleSizes, overallSampleSizesTemp) overallMeansTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_MEANS, suffix = group ) .validateValues(overallMeansTemp, paste0("overallMeans", group)) overallMeans <<- c(overallMeans, overallMeansTemp) overallStDevsTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_ST_DEVS, suffix = group ) .validateValues(overallStDevsTemp, paste0("overallStDevs", group)) overallStDevs <<- c(overallStDevs, overallStDevsTemp) groups <<- c(groups, rep(as.integer(group), length(overallSampleSizesTemp))) } } else { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "sample sizes are missing or not correctly specified" ) } if (.inputType == "stagewise") { n <- length(sampleSizes) overallSampleSizes <<- rep(NA_real_, n) overallMeans <<- rep(NA_real_, n) overallStDevs <<- rep(NA_real_, n) .setParameterType("sampleSizes", C_PARAM_USER_DEFINED) .setParameterType("means", C_PARAM_USER_DEFINED) .setParameterType("stDevs", C_PARAM_USER_DEFINED) .setParameterType("overallSampleSizes", C_PARAM_GENERATED) .setParameterType("overallMeans", C_PARAM_GENERATED) .setParameterType("overallStDevs", C_PARAM_GENERATED) .recreateDataFrame() .createOverallData() } else { n <- length(overallSampleSizes) sampleSizes <<- rep(NA_real_, n) means <<- rep(NA_real_, n) stDevs <<- rep(NA_real_, n) .setParameterType("sampleSizes", C_PARAM_GENERATED) .setParameterType("means", C_PARAM_GENERATED) .setParameterType("stDevs", C_PARAM_GENERATED) .setParameterType("overallSampleSizes", C_PARAM_USER_DEFINED) .setParameterType("overallMeans", C_PARAM_USER_DEFINED) .setParameterType("overallStDevs", C_PARAM_USER_DEFINED) .recreateDataFrame() .createStageWiseData() } if (sum(stats::na.omit(sampleSizes) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be >= 0") } if (sum(stats::na.omit(stDevs) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all standard deviations must be >= 0") } }, .recreateDataFrame = function() { callSuper() .data <<- cbind(.data, data.frame( sampleSize = sampleSizes, mean = means, stDev = stDevs, overallSampleSize = overallSampleSizes, overallMean = overallMeans, overallStDev = overallStDevs )) .orderDataByStageAndGroup() .setDataToVariables() }, .setDataToVariables = function() { callSuper() sampleSizes <<- .data$sampleSize means <<- .data$mean stDevs <<- .data$stDev overallSampleSizes <<- .data$overallSampleSize overallMeans <<- .data$overallMean overallStDevs <<- .data$overallStDev }, .fillWithNAs = function(kMax) { callSuper(kMax) n <- .getNumberOfNAsToAdd(kMax) naRealsToAdd <- rep(NA_real_, n) sampleSizes <<- c(sampleSizes, naRealsToAdd) means <<- c(means, naRealsToAdd) stDevs <<- c(stDevs, naRealsToAdd) overallSampleSizes <<- c(overallSampleSizes, naRealsToAdd) overallMeans <<- c(overallMeans, naRealsToAdd) overallStDevs <<- c(overallStDevs, naRealsToAdd) .recreateDataFrame() }, .trim = function(kMax = NA_integer_) { indices <- callSuper(kMax) if (length(indices) == 0) { return(invisible(FALSE)) } sampleSizes <<- sampleSizes[indices] means <<- means[indices] stDevs <<- stDevs[indices] overallSampleSizes <<- overallSampleSizes[indices] overallMeans <<- overallMeans[indices] overallStDevs <<- overallStDevs[indices] .recreateDataFrame() return(invisible(TRUE)) }, .getOverallMeans = function(sampleSizes, means) { return(cumsum(sampleSizes * means) / cumsum(sampleSizes)) }, .getOverallStDevs = function(sampleSizes, means, stDevs, overallMeans) { kMax <- length(sampleSizes) overallStDev <- rep(NA_real_, kMax) for (k in 1:kMax) { overallStDev[k] <- sqrt((sum((sampleSizes[1:k] - 1) * stDevs[1:k]^2) + sum(sampleSizes[1:k] * (means[1:k] - overallMeans[k])^2)) / (sum(sampleSizes[1:k]) - 1)) } return(overallStDev) }, .createOverallData = function() { .data$overallSampleSize <<- rep(NA_real_, nrow(.data)) .data$overallMean <<- rep(NA_real_, nrow(.data)) .data$overallStDev <<- rep(NA_real_, nrow(.data)) subsetLevels <- NA_character_ if (.enrichmentEnabled) { subsetLevels <- levels(.data$subset) } for (s in subsetLevels) { for (g in levels(.data$group)) { if (!is.na(s)) { indices <- which(.data$subset == s & .data$group == g) } else { indices <- which(.data$group == g) } .data$overallSampleSize[indices] <<- cumsum(.data$sampleSize[indices]) .data$overallMean[indices] <<- .getOverallMeans( .data$sampleSize[indices], .data$mean[indices] ) .data$overallStDev[indices] <<- .getOverallStDevs( .data$sampleSize[indices], .data$mean[indices], .data$stDev[indices], .data$overallMean[indices] ) } } .setDataToVariables() }, .getStageWiseSampleSizes = function(overallSampleSizes) { result <- overallSampleSizes if (length(overallSampleSizes) == 1) { return(result) } kMax <- length(overallSampleSizes) result[2:kMax] <- overallSampleSizes[2:kMax] - overallSampleSizes[1:(kMax - 1)] return(result) }, .getStageWiseMeans = function(sampleSizes, overallSampleSizes, overallMeans) { result <- overallMeans if (length(overallMeans) == 1) { return(result) } for (k in 2:length(overallMeans)) { result[k] <- (overallSampleSizes[k] * overallMeans[k] - overallSampleSizes[k - 1] * overallMeans[k - 1]) / sampleSizes[k] } return(result) }, .getStageWiseStDev = function(overallStDevs, sampleSizes, overallSampleSizes, means, overallMeans, k) { numBeforeK <- (overallSampleSizes[k - 1] - 1) * overallStDevs[k - 1]^2 numK <- (overallSampleSizes[k] - 1) * overallStDevs[k]^2 numSumBeforeK <- sum(sampleSizes[1:(k - 1)] * (means[1:(k - 1)] - overallMeans[k - 1])^2) numSumK <- sum(sampleSizes[1:k] * (means[1:k] - overallMeans[k])^2) denom <- (sampleSizes[k] - 1) value <- (numK - numBeforeK + numSumBeforeK - numSumK) / denom if (is.null(value) || length(value) != 1 || is.na(value) || value < 0) { warning("No calculation of stage-wise standard deviation from ", "overall standard deviations possible at stage ", k, call. = FALSE ) return(NA_real_) } return(sqrt(value)) }, .getStageWiseStDevs = function(overallStDevs, sampleSizes, overallSampleSizes, means, overallMeans) { result <- overallStDevs if (length(overallStDevs) == 1) { return(result) } for (k in 2:length(overallStDevs)) { result[k] <- .getStageWiseStDev(overallStDevs, sampleSizes, overallSampleSizes, means, overallMeans, k) } return(result) }, .createStageWiseData = function() { "Calculates stage-wise means and standard deviation if cunulative data is available" .data$sampleSize <<- rep(NA_real_, nrow(.data)) .data$mean <<- rep(NA_real_, nrow(.data)) .data$stDev <<- rep(NA_real_, nrow(.data)) subsetLevels <- NA_character_ if (.enrichmentEnabled) { subsetLevels <- levels(.data$subset) } for (s in subsetLevels) { for (g in levels(.data$group)) { if (!is.na(s)) { indices <- which(.data$subset == s & .data$group == g) } else { indices <- which(.data$group == g) } .assertValuesAreStrictlyIncreasing(.data$overallSampleSize[indices], paste0("overallSampleSizes", g), endingNasAllowed = TRUE ) .data$sampleSize[indices] <<- .getStageWiseSampleSizes(.data$overallSampleSize[indices]) .data$mean[indices] <<- .getStageWiseMeans( .data$sampleSize[indices], .data$overallSampleSize[indices], .data$overallMean[indices] ) .data$stDev[indices] <<- .getStageWiseStDevs( .data$overallStDev[indices], .data$sampleSize[indices], .data$overallSampleSize[indices], .data$mean[indices], .data$overallMean[indices] ) } } .setDataToVariables() }, getRandomData = function() { return(.getRandomDataMeans(.self)) } ) ) #' @examples #' #' datasetExample <- getDataset( #' means1 = c(112.3, 105.1, 121.3), #' means2 = c(98.1, 99.3, 100.1), #' means3 = c(98.1, 99.3, 100.1), #' stDevs1 = c(44.4, 42.9, 41.4), #' stDevs2 = c(46.7, 41.1, 39.5), #' stDevs3 = c(46.7, 41.1, 39.5), #' n1 = c(84, 81, 82), #' n2 = c(87, 83, 81), #' n3 = c(87, 82, 84) #' ) #' .getRandomDataMeans(datasetExample, #' randomDataParamName = "outcome", numberOfVisits = 3, #' fixedCovariates = list(gender = c("f", "m"), bmi = c(17, 40)) #' ) #' #' @noRd #' .getRandomDataMeans <- function(dataset, ..., treatmentName = "Treatment group", controlName = "Control group", randomDataParamName = "randomData", numberOfVisits = 1L, fixedCovariates = NULL, covariateEffects = NULL, seed = NA_real_) { if (!is.null(fixedCovariates)) { if (!is.list(fixedCovariates)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("fixedCovariates"), " must be a named list") } } if (!is.null(covariateEffects)) { if (!is.list(covariateEffects)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("covariateEffects"), " must be a named list") } } .assertIsSingleCharacter(treatmentName, "treatmentName") .assertIsSingleCharacter(controlName, "controlName") .assertIsSingleCharacter(randomDataParamName, "randomDataParamName") .assertIsSinglePositiveInteger(numberOfVisits, "numberOfVisits", validateType = FALSE) .assertIsSingleNumber(seed, "seed", naAllowed = TRUE) seed <- .setSeed(seed) numberOfGroups <- dataset$getNumberOfGroups() sampleSize <- 0 for (stage in 1:dataset$getNumberOfStages()) { for (group in 1:numberOfGroups) { if (dataset$.enrichmentEnabled) { for (subset in levels(dataset$.data$subset)) { n <- dataset$getSampleSize(stage = stage, group = group, subset = subset) if (n > sampleSize) { sampleSize <- n } } } else { n <- dataset$getSampleSize(stage = stage, group = group) n <- round(n / numberOfVisits) if (n > sampleSize) { sampleSize <- n } } } } idFactor <- 10^nchar(as.character(sampleSize)) data <- NULL for (stage in 1:dataset$getNumberOfStages()) { for (group in 1:numberOfGroups) { for (visit in 1:numberOfVisits) { if (dataset$.enrichmentEnabled) { for (subset in levels(dataset$.data$subset)) { n <- dataset$getSampleSize(stage = stage, group = group, subset = subset) randomData <- stats::rnorm( n = n, mean = dataset$getMean(stage = stage, group = group, subset = subset), sd = dataset$getStDev(stage = stage, group = group, subset = subset) ) row <- data.frame( subject = idFactor * group + c(1:n), stage = rep(stage, n), group = rep(group, n), subset = rep(subset, n), randomData = randomData ) if (is.null(data)) { data <- row } else { data <- rbind(data, row) } } } else { n <- dataset$getSampleSize(stage = stage, group = group) n <- floor(n / numberOfVisits) randomData <- stats::rnorm( n = sampleSize, mean = dataset$getMean(stage = stage, group = group), sd = dataset$getStDev(stage = stage, group = group) ) subjectIds <- (idFactor * 10 * stage) + (idFactor * group) + c(1:sampleSize) indices <- 1:sampleSize randomDataBefore <- NULL numberOfDropOutsBefore <- 0 if (visit > 1 && !is.null(data)) { randomDataBefore <- data$randomData[data$stage == visit - 1 & data$subject %in% subjectIds] numberOfDropOutsBefore <- sum(is.na(randomDataBefore)) indices <- which(!is.na(randomDataBefore)) } sampleSizeBefore <- sampleSize - numberOfDropOutsBefore if (n < sampleSizeBefore) { numberOfDropOuts <- sampleSizeBefore - n dropOuts <- sample(c(rep(1, n - numberOfDropOuts), rep(0, numberOfDropOuts))) randomData[indices[dropOuts == 0]] <- NA_real_ if (!is.null(randomDataBefore)) { randomData[is.na(randomDataBefore)] <- NA_real_ } } row <- data.frame( subject = subjectIds, stage = rep(stage, sampleSize), group = rep(group, sampleSize), visit = rep(visit - 1, sampleSize), randomData = randomData ) if (is.null(data)) { data <- row } else { data <- rbind(data, row) } } } } } data$stage <- factor(data$stage) groupLevels <- paste(treatmentName, c(1:numberOfGroups)) if (numberOfGroups > 1) { if (numberOfGroups == 2) { groupLevels[1] <- treatmentName } groupLevels[numberOfGroups] <- controlName } data$group <- factor(data$group, labels = groupLevels) if (dataset$.enrichmentEnabled) { data$subset <- factor(data$subset) } if (!is.null(randomDataParamName) && length(randomDataParamName) == 1 && !is.na(randomDataParamName)) { colNames <- colnames(data) colNames[colNames == "randomData"] <- randomDataParamName colnames(data) <- colNames } if (!is.null(fixedCovariates)) { fixedCovariateNames <- names(fixedCovariates) if (is.null(fixedCovariateNames) || any(nchar(trimws(fixedCovariateNames)) == 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("fixedCovariates"), " must be a named list") } subjects <- sort(unique(data$subject)) for (fixedCovariateName in fixedCovariateNames) { data[[fixedCovariateName]] <- rep(NA, nrow(data)) values <- fixedCovariates[[fixedCovariateName]] if (is.null(values) || length(values) < 2 || any(is.na(values))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(paste0("fixedCovariates$", fixedCovariateName)), " (", .arrayToString(values), ") must be a valid numeric or character vector with a minimum of 2 values" ) } if (is.character(values)) { if (length(unique(values)) < length(values)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(paste0("fixedCovariates$", fixedCovariateName)), " (", .arrayToString(values, maxLength = 20), ") must be a unique vector" ) } fixedCovariateSample <- sample(values, length(subjects), replace = TRUE) for (i in 1:length(subjects)) { data[[fixedCovariateName]][data$subject == subjects[i]] <- fixedCovariateSample[i] } } else if (is.numeric(values)) { if (length(values) == 2) { minValue <- min(values) maxValue <- max(values) covMean <- runif(1, minValue, maxValue) covSD <- covMean * 0.1 showMessage <- TRUE for (i in 1:length(subjects)) { groupName <- as.character(data$group[data$subject == subjects[i]])[1] covEffect <- 1 if (groupName == controlName && !is.null(covariateEffects)) { covEffect <- covariateEffects[[fixedCovariateName]] if (is.null(covEffect)) { covEffect <- 1 } else { .assertIsNumericVector(covEffect, paste0("covariateEffects$", fixedCovariateName)) if (showMessage) { message( "Add effect ", covEffect, " to ", sQuote(fixedCovariateName), " of ", sQuote(groupName) ) showMessage <- FALSE } } } continuesExample <- rnorm(sum(data$subject == subjects[i]), covMean * covEffect, covSD) data[[fixedCovariateName]][data$subject == subjects[i]] <- continuesExample } } } } } data$seed <- rep(seed, nrow(data)) return(data) } #' #' @title #' Dataset Plotting #' #' @description #' Plots a dataset. #' #' @param x The \code{\link{Dataset}} object to plot. #' @param y Not available for this kind of plot (is only defined to be compatible #' to the generic plot function). #' @param main The main title, default is \code{"Dataset"}. #' @param xlab The x-axis label, default is \code{"Stage"}. #' @param ylab The y-axis label. #' @param legendTitle The legend title, default is \code{"Group"}. #' @inheritParams param_palette #' @inheritParams param_showSource #' @inheritParams param_plotSettings #' @inheritParams param_three_dots_plot #' #' @details #' Generic function to plot all kinds of datasets. #' #' @template return_object_ggplot #' #' @examples #' # Plot a dataset of means #' dataExample <- getDataset( #' n1 = c(22, 11, 22, 11), #' n2 = c(22, 13, 22, 13), #' means1 = c(1, 1.1, 1, 1), #' means2 = c(1.4, 1.5, 3, 2.5), #' stDevs1 = c(1, 2, 2, 1.3), #' stDevs2 = c(1, 2, 2, 1.3) #' ) #' \dontrun{ #' if (require(ggplot2)) plot(dataExample, main = "Comparison of Means") #' } #' #' # Plot a dataset of rates #' dataExample <- getDataset( #' n1 = c(8, 10, 9, 11), #' n2 = c(11, 13, 12, 13), #' events1 = c(3, 5, 5, 6), #' events2 = c(8, 10, 12, 12) #' ) #' \dontrun{ #' if (require(ggplot2)) plot(dataExample, main = "Comparison of Rates") #' } #' #' @export #' plot.Dataset <- function(x, y, ..., main = "Dataset", xlab = "Stage", ylab = NA_character_, legendTitle = "Group", palette = "Set1", showSource = FALSE, plotSettings = NULL) { if (x$.enrichmentEnabled) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "plot of enrichment data is not implemented yet") } .assertGgplotIsInstalled() if (x$isDatasetMeans()) { data <- x$getRandomData() if (is.na(ylab)) { ylab <- "Random data" } } else if (x$isDatasetRates()) { data <- x$.data if (is.na(ylab)) { ylab <- "Frequency (Events and Sample Size)" } } else if (x$isDatasetSurvival()) { # Open work: implement dataset plot of survival data stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "plot of survival data is not implemented yet") } if (!is.logical(showSource) || isTRUE(showSource)) { warning("'showSource' != FALSE is not implemented yet for class ", .getClassName(x)) } if (is.null(plotSettings)) { plotSettings <- x$getPlotSettings() } if (x$getNumberOfGroups() == 1) { if (x$isDatasetMeans()) { p <- ggplot2::ggplot( data = data, ggplot2::aes(y = .data[["randomData"]], x = factor(.data[["stage"]])) ) p <- p + ggplot2::geom_boxplot(ggplot2::aes(fill = .data[["stage"]])) p <- p + ggplot2::geom_point( colour = "#0e414e", shape = 20, position = ggplot2::position_jitter(width = .1), size = plotSettings$pointSize ) p <- p + ggplot2::stat_summary( fun = "mean", geom = "point", shape = 21, position = ggplot2::position_dodge(.75), size = 4, fill = "white", colour = "black", show.legend = FALSE ) } else if (x$isDatasetRates()) { p <- ggplot2::ggplot(show.legend = FALSE) # plot sample size p <- p + ggplot2::geom_bar( data = data, ggplot2::aes( y = .data[["sampleSize"]], x = factor(.data[["stage"]]), fill = factor(.data[["stage"]]) ), position = "dodge", stat = "identity", alpha = 0.4 ) # plot events p <- p + ggplot2::geom_bar( data = data, ggplot2::aes( y = .data[["event"]], x = factor(.data[["stage"]]), fill = factor(.data[["stage"]]) ), position = "dodge", stat = "identity" ) } else if (x$isDatasetSurvival()) { # implement survival plot here } } else { data$stageGroup <- interaction(data$stage, data$group) if (x$isDatasetMeans()) { p <- ggplot2::ggplot(ggplot2::aes( y = .data[["randomData"]], x = factor(.data[["stage"]]), fill = factor(.data[["group"]]) ), data = data) p <- p + ggplot2::geom_point(ggplot2::aes(colour = .data[["group"]]), shape = 20, position = ggplot2::position_dodge(.75), size = plotSettings$pointSize ) p <- p + ggplot2::geom_boxplot() p <- p + ggplot2::stat_summary(ggplot2::aes(colour = .data[["group"]]), fun = "mean", geom = "point", shape = 21, position = ggplot2::position_dodge(.75), size = 4, fill = "white", show.legend = FALSE ) } else if (x$isDatasetRates()) { p <- ggplot2::ggplot(show.legend = FALSE) # plot sample size p <- p + ggplot2::geom_bar( ggplot2::aes( y = .data[["sampleSize"]], x = factor(.data[["stage"]]), fill = factor(.data[["group"]]) ), data = data, position = "dodge", stat = "identity", alpha = 0.4 ) # plot events p <- p + ggplot2::geom_bar( data = data, ggplot2::aes( y = .data[["event"]], x = factor(.data[["stage"]]), fill = factor(.data[["group"]]) ), position = "dodge", stat = "identity" ) } else if (x$isDatasetSurvival()) { # implement survival plot here } } # hide second legend if (x$getNumberOfGroups() == 1) { p <- p + ggplot2::guides(fill = FALSE, colour = FALSE) } else { p <- p + ggplot2::guides(colour = FALSE) } # set theme p <- plotSettings$setTheme(p) # p <- designSet$getPlotSettings()$hideGridLines(p) # set main title p <- plotSettings$setMainTitle(p, main) # set axes labels p <- plotSettings$setAxesLabels(p, xlab = xlab, ylab = ylab) # set legend if (x$getNumberOfGroups() > 1) { p <- plotSettings$setLegendPosition(p, legendPosition = C_POSITION_OUTSIDE_PLOT) p <- plotSettings$setLegendBorder(p) p <- plotSettings$setLegendTitle(p, legendTitle, mode = "fill") p <- plotSettings$setLegendLabelSize(p) } p <- plotSettings$setAxesAppearance(p) p <- plotSettings$setColorPalette(p, palette, mode = "all") p <- plotSettings$enlargeAxisTicks(p) companyAnnotationEnabled <- .getOptionalArgument("companyAnnotationEnabled", ...) if (is.null(companyAnnotationEnabled) || !is.logical(companyAnnotationEnabled)) { companyAnnotationEnabled <- FALSE } p <- plotSettings$addCompanyAnnotation(p, enabled = companyAnnotationEnabled) p } #' #' @name DatasetRates #' #' @title #' Dataset of Rates #' #' @description #' Class for a dataset of rates. #' #' @template field_groups #' @template field_stages #' @template field_sampleSizes #' @template field_overallSampleSizes #' @template field_events #' @template field_overallEvents #' #' @details #' This object cannot be created directly; better use \code{\link{getDataset}} #' with suitable arguments to create a dataset of rates. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' DatasetRates <- setRefClass("DatasetRates", contains = "Dataset", fields = list( sampleSizes = "numeric", events = "numeric", overallSampleSizes = "numeric", overallEvents = "numeric" ), methods = list( getSampleSize = function(stage, group = 1, subset = NA_character_) { return(.data$sampleSize[.getIndices(stage = stage, group = group, subset = subset)]) }, getSampleSizes = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$sampleSize[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getSampleSizesUpTo = function(to, group = 1, subset = NA_character_) { return(.data$sampleSize[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getEvent = function(stage, group = 1, subset = NA_character_) { return(.data$event[.getIndices(stage = stage, group = group, subset = subset)]) }, getEvents = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$event[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getEventsUpTo = function(to, group = 1, subset = NA_character_) { return(.data$event[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getOverallSampleSize = function(stage, group = 1, subset = NA_character_) { return(.data$overallSampleSize[.getIndices(stage = stage, group = group, subset = subset)]) }, getOverallSampleSizes = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$overallSampleSize[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getOverallSampleSizesUpTo = function(to, group = 1, subset = NA_character_) { return(.data$overallSampleSize[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getOverallEvent = function(stage, group = 1, subset = NA_character_) { return(.data$overallEvent[.getIndices(stage = stage, group = group, subset = subset)]) }, getOverallEvents = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$overallEvent[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getOverallEventsUpTo = function(to, group = 1, subset = NA_character_) { return(.data$overallEvent[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, .initByDataFrame = function(dataFrame) { callSuper(dataFrame) # case: one rate - stage wise if (.paramExists(dataFrame, C_KEY_WORDS_SAMPLE_SIZES)) { .inputType <<- "stagewise" sampleSizes <<- .getValidatedFloatingPointNumbers( .getValuesByParameterName(dataFrame, C_KEY_WORDS_SAMPLE_SIZES), parameterName = "Sample sizes" ) .validateValues(sampleSizes, "n") if (any(stats::na.omit(sampleSizes) <= 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be > 0, but 'n' = ", .arrayToString(sampleSizes, vectorLookAndFeelEnabled = TRUE) ) } events <<- .getValidatedFloatingPointNumbers( .getValuesByParameterName(dataFrame, C_KEY_WORDS_EVENTS), parameterName = "Events" ) .validateValues(events, "events") if (any(stats::na.omit(events) < 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all events must be >= 0, but 'events' = ", .arrayToString(events, vectorLookAndFeelEnabled = TRUE) ) } kMax <- length(sampleSizes) stageNumber <- length(stats::na.omit(sampleSizes)) dataInput <- data.frame( sampleSizes = sampleSizes, events = events ) dataInput <- .getOverallData(dataInput, kMax, stage = stageNumber) overallSampleSizes <<- dataInput$overallSampleSizes overallEvents <<- dataInput$overallEvents .setParameterType("sampleSizes", C_PARAM_USER_DEFINED) .setParameterType("events", C_PARAM_USER_DEFINED) .setParameterType("overallSampleSizes", C_PARAM_GENERATED) .setParameterType("overallEvents", C_PARAM_GENERATED) } # case: one rate - overall else if (.paramExists(dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES)) { .inputType <<- "overall" overallSampleSizes <<- .getValidatedFloatingPointNumbers( .getValuesByParameterName( dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES ), parameterName = "Cumulative sample sizes" ) .validateValues(overallSampleSizes, "overallSampleSizes") .assertValuesAreStrictlyIncreasing(overallSampleSizes, "overallSampleSizes", endingNasAllowed = TRUE) overallEvents <<- .getValidatedFloatingPointNumbers( .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_EVENTS), parameterName = "Cumulative events" ) .validateValues(overallEvents, "overallEvents") .assertValuesAreMonotoneIncreasing(overallEvents, "overallEvents", endingNasAllowed = TRUE) kMax <- length(overallSampleSizes) stageNumber <- length(stats::na.omit(overallSampleSizes)) stageWiseData <- .getStageWiseData(data.frame( overallSampleSizes = overallSampleSizes, overallEvents = overallEvents ), kMax, stage = stageNumber) sampleSizes <<- stageWiseData$sampleSizes events <<- stageWiseData$events .setParameterType("sampleSizes", C_PARAM_GENERATED) .setParameterType("events", C_PARAM_GENERATED) .setParameterType("overallSampleSizes", C_PARAM_USER_DEFINED) .setParameterType("overallEvents", C_PARAM_USER_DEFINED) } # case: two or more rates - stage wise else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_SAMPLE_SIZES, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_SAMPLE_SIZES, 2))) { .inputType <<- "stagewise" numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_SAMPLE_SIZES) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) sampleSizes <<- numeric(0) events <<- numeric(0) overallSampleSizes <<- numeric(0) overallEvents <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { sampleSizesTemp <- .getValidatedFloatingPointNumbers( .getValuesByParameterName( dataFrame, C_KEY_WORDS_SAMPLE_SIZES, suffix = group ), parameterName = "Sample sizes" ) .validateValues(sampleSizesTemp, paste0("n", group)) if (any(stats::na.omit(sampleSizesTemp) <= 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be > 0, but 'n", group, "' = ", .arrayToString(sampleSizesTemp, vectorLookAndFeelEnabled = TRUE) ) } sampleSizes <<- c(sampleSizes, sampleSizesTemp) eventsTemp <- .getValidatedFloatingPointNumbers( .getValuesByParameterName(dataFrame, C_KEY_WORDS_EVENTS, suffix = group), parameterName = "Events" ) .validateValues(eventsTemp, paste0("events", group)) if (any(stats::na.omit(eventsTemp) < 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all events must be >= 0, but 'events", group, "' = ", .arrayToString(eventsTemp, vectorLookAndFeelEnabled = TRUE) ) } events <<- c(events, eventsTemp) groups <<- c(groups, rep(as.integer(group), length(sampleSizesTemp))) kMax <- length(sampleSizesTemp) numberOfValidStages <- length(stats::na.omit(sampleSizesTemp)) overallData <- .getOverallData(data.frame( sampleSizes = sampleSizesTemp, events = eventsTemp ), kMax, stage = numberOfValidStages) overallSampleSizes <<- c(overallSampleSizes, overallData$overallSampleSizes) overallEvents <<- c(overallEvents, overallData$overallEvents) } if (sum(stats::na.omit(sampleSizes) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be >= 0") } .setParameterType("sampleSizes", C_PARAM_USER_DEFINED) .setParameterType("events", C_PARAM_USER_DEFINED) .setParameterType("overallSampleSizes", C_PARAM_GENERATED) .setParameterType("overallEvents", C_PARAM_GENERATED) } # case: two or more rates - overall else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_SAMPLE_SIZES, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_SAMPLE_SIZES, 2))) { .inputType <<- "overall" numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) sampleSizes <<- numeric(0) events <<- numeric(0) overallSampleSizes <<- numeric(0) overallEvents <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { overallSampleSizesTemp <- .getValidatedFloatingPointNumbers( .getValuesByParameterName( dataFrame, C_KEY_WORDS_OVERALL_SAMPLE_SIZES, suffix = group ), parameterName = "Cumulative sample sizes" ) .validateValues(overallSampleSizesTemp, paste0("overallSampleSizes", group)) .assertValuesAreStrictlyIncreasing(overallSampleSizesTemp, paste0("overallSampleSizes", group), endingNasAllowed = TRUE ) overallSampleSizes <<- c(overallSampleSizes, overallSampleSizesTemp) overallEventsTemp <- .getValidatedFloatingPointNumbers( .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_EVENTS, suffix = group ), parameterName = "Cumulative events" ) .validateValues(overallEventsTemp, paste0("overallEvents", group)) .assertValuesAreMonotoneIncreasing(overallEventsTemp, paste0("overallEvents", group), endingNasAllowed = TRUE ) overallEvents <<- c(overallEvents, overallEventsTemp) groups <<- c(groups, rep(as.integer(group), length(overallSampleSizesTemp))) kMax <- length(overallSampleSizesTemp) numberOfValidStages <- length(stats::na.omit(overallSampleSizesTemp)) stageWiseData <- .getStageWiseData(data.frame( overallSampleSizes = overallSampleSizesTemp, overallEvents = overallEventsTemp ), kMax, stage = numberOfValidStages) validatedSampleSizes <- stageWiseData$sampleSizes .validateValues(validatedSampleSizes, paste0("n", group)) sampleSizes <<- c(sampleSizes, validatedSampleSizes) events <<- c(events, stageWiseData$events) if (sum(stats::na.omit(sampleSizes) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all sample sizes must be >= 0") } } .setParameterType("sampleSizes", C_PARAM_GENERATED) .setParameterType("events", C_PARAM_GENERATED) .setParameterType("overallSampleSizes", C_PARAM_USER_DEFINED) .setParameterType("overallEvents", C_PARAM_USER_DEFINED) } else { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "sample sizes are missing or not correctly specified" ) } if (sum(stats::na.omit(events) < 0) > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all events must be >= 0") } .recreateDataFrame() if (.enrichmentEnabled) { .createOverallDataEnrichment() } }, .recreateDataFrame = function() { callSuper() .data <<- cbind(.data, data.frame( sampleSize = sampleSizes, event = events, overallSampleSize = overallSampleSizes, overallEvent = overallEvents )) .orderDataByStageAndGroup() .setDataToVariables() }, .setDataToVariables = function() { callSuper() sampleSizes <<- .data$sampleSize events <<- .data$event overallSampleSizes <<- .data$overallSampleSize overallEvents <<- .data$overallEvent }, .fillWithNAs = function(kMax) { callSuper(kMax) n <- .getNumberOfNAsToAdd(kMax) sampleSizes <<- c(sampleSizes, rep(NA_real_, n)) events <<- c(events, rep(NA_real_, n)) overallSampleSizes <<- c(overallSampleSizes, rep(NA_real_, n)) overallEvents <<- c(overallEvents, rep(NA_real_, n)) .recreateDataFrame() }, .trim = function(kMax = NA_integer_) { indices <- callSuper(kMax) if (length(indices) == 0) { return(invisible(FALSE)) } sampleSizes <<- sampleSizes[indices] events <<- events[indices] overallSampleSizes <<- overallSampleSizes[indices] overallEvents <<- overallEvents[indices] .recreateDataFrame() return(invisible(TRUE)) }, getRandomData = function() { data <- NULL for (stage in 1:getNumberOfStages()) { for (group in 1:getNumberOfGroups()) { if (.enrichmentEnabled) { for (subset in levels(.data$subset)) { n <- getSampleSize(stage = stage, group = group, subset = subset) numberOfEvents <- getEvent(stage = stage, group = group, subset = subset) randomIndizes <- sample(x = c(1:n), size = numberOfEvents, replace = FALSE) randomData <- rep(0, n) randomData[randomIndizes] <- 1 row <- data.frame( stage = stage, group = group, subset = subset, randomData = randomData ) if (is.null(data)) { data <- row } else { data <- rbind(data, row) } } } else { n <- getSampleSize(stage = stage, group = group) numberOfEvents <- getEvent(stage = stage, group = group) randomIndizes <- sample(x = c(1:n), size = numberOfEvents, replace = FALSE) randomData <- rep(0, n) randomData[randomIndizes] <- 1 row <- data.frame( stage = stage, group = group, randomData = randomData ) if (is.null(data)) { data <- row } else { data <- rbind(data, row) } } } } data$stage <- factor(data$stage) data$group <- factor(data$group, label = paste("Group", c(1:getNumberOfGroups()))) return(data) }, .createOverallDataEnrichment = function() { if (!.enrichmentEnabled) { return(invisible()) } .data$overallSampleSize <<- rep(NA_real_, nrow(.data)) .data$overallEvent <<- rep(NA_real_, nrow(.data)) for (s in levels(.data$subset)) { for (g in levels(.data$group)) { indices <- which(.data$subset == s & .data$group == g) .data$overallSampleSize[indices] <<- cumsum(.data$sampleSize[indices]) .data$overallEvent[indices] <<- cumsum(.data$event[indices]) } } .setDataToVariables() }, .getOverallData = function(dataInput, kMax, stage) { "Calculates cumulative values if stage-wise data is available" if (is.null(dataInput[["sampleSizes"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'sampleSizes'") } if (is.null(dataInput[["events"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'events'") } dataInput$overallSampleSizes <- c( cumsum(dataInput$sampleSizes[1:stage]), rep(NA_real_, kMax - stage) ) dataInput$overallEvents <- c( cumsum(dataInput$events[1:stage]), rep(NA_real_, kMax - stage) ) return(dataInput) }, .getStageWiseData = function(dataInput, kMax, stage) { "Calculates stage-wise values if cumulative data is available" if (is.null(dataInput[["overallSampleSizes"]])) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'overallSampleSizes'" ) } if (is.null(dataInput[["overallEvents"]])) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data input must contain variable 'overallEvents'" ) } dataInput$sampleSizes <- c(dataInput$overallSampleSizes[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1) { dataInput$sampleSizes[2:stage] <- dataInput$overallSampleSizes[2:stage] - dataInput$overallSampleSizes[1:(stage - 1)] } dataInput$events <- c(dataInput$overallEvents[1:stage], rep(NA_real_, kMax - stage)) if (stage > 1) { dataInput$events[2:stage] <- dataInput$overallEvents[2:stage] - dataInput$overallEvents[1:(stage - 1)] } return(dataInput) } ) ) #' #' @name DatasetSurvival #' #' @title #' Dataset of Survival Data #' #' @description #' Class for a dataset of survival data. #' #' @template field_groups #' @template field_stages #' @template field_events #' @template field_overallEvents #' @template field_allocationRatios #' @template field_overallAllocationRatios #' @template field_logRanks #' @template field_overallLogRanks #' #' #' @details #' This object cannot be created directly; better use \code{\link{getDataset}} #' with suitable arguments to create a dataset of survival data. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' DatasetSurvival <- setRefClass("DatasetSurvival", contains = "Dataset", fields = list( overallEvents = "numeric", overallAllocationRatios = "numeric", overallLogRanks = "numeric", events = "numeric", allocationRatios = "numeric", logRanks = "numeric" ), methods = list( getEvent = function(stage, group = 1, subset = NA_character_) { return(.data$event[.getIndices(stage = stage, group = group, subset = subset)]) }, getEvents = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$event[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getEventsUpTo = function(to, group = 1, subset = NA_character_) { return(.data$event[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getAllocationRatio = function(stage, group = 1, subset = NA_character_) { return(.data$allocationRatio[.getIndices(stage = stage, group = group, subset = subset)]) }, getAllocationRatios = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$allocationRatio[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getAllocationRatiosUpTo = function(to, group = 1, subset = NA_character_) { return(.data$allocationRatio[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getLogRank = function(stage, group = 1, subset = NA_character_) { return(.data$logRank[.getIndices(stage = stage, group = group, subset = subset)]) }, getLogRanks = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$logRank[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getLogRanksUpTo = function(to, group = 1, subset = NA_character_) { return(.data$logRank[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getOverallEvent = function(stage, group = 1, subset = NA_character_) { return(.data$overallEvent[.getIndices(stage = stage, group = group, subset = subset)]) }, getOverallEvents = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$overallEvent[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getOverallEventsUpTo = function(to, group = 1, subset = NA_character_) { return(.data$overallEvent[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getOverallAllocationRatio = function(stage, group = 1, subset = NA_character_) { return(.data$overallAllocationRatio[.getIndices(stage = stage, group = group, subset = subset)]) }, getOverallAllocationRatios = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$overallAllocationRatio[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getOverallAllocationRatiosUpTo = function(to, group = 1, subset = NA_character_) { return(.data$overallAllocationRatio[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getOverallLogRank = function(stage, group = 1, subset = NA_character_) { return(.data$overallLogRank[.getIndices(stage = stage, group = group, subset = subset)]) }, getOverallLogRanks = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$overallLogRank[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getOverallLogRanksUpTo = function(to, group = 1, subset = NA_character_) { return(.data$overallLogRank[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, .getAllocationRatioDefaultValues = function(stages, events, logRanks) { allocationRatioDefaultValues <- rep(C_ALLOCATION_RATIO_DEFAULT, length(stages)) indices <- which(is.na(events) | is.na(logRanks)) allocationRatioDefaultValues[indices] <- NA_real_ return(allocationRatioDefaultValues) }, .initByDataFrame = function(dataFrame) { callSuper(dataFrame) if (inherits(.self, "DatasetEnrichmentSurvival")) { if (.paramExists(dataFrame, C_KEY_WORDS_EXPECTED_EVENTS) || .paramExists(dataFrame, C_KEY_WORDS_VARIANCE_EVENTS)) { .inputType <<- "stagewise" events <<- .getValidatedFloatingPointNumbers( .getValuesByParameterName(dataFrame, C_KEY_WORDS_EVENTS), parameterName = "Events" ) .validateValues(events, "events") allocationRatios <<- .getValuesByParameterName( dataFrame, C_KEY_WORDS_ALLOCATION_RATIOS, defaultValues = .getAllocationRatioDefaultValues(stages, events, expectedEvents) ) .validateValues(allocationRatios, "allocationRatios") } else if (.paramExists(dataFrame, C_KEY_WORDS_OVERALL_EXPECTED_EVENTS) || .paramExists(dataFrame, C_KEY_WORDS_OVERALL_VARIANCE_EVENTS)) { .inputType <<- "overall" overallEvents <<- .getValidatedFloatingPointNumbers( .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_EVENTS), parameterName = "Cumulative events" ) .validateValues(overallEvents, "overallEvents") overallAllocationRatios <<- .getValuesByParameterName( dataFrame, parameterNameVariants = C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, defaultValues = .getAllocationRatioDefaultValues(stages, overallEvents, overallExpectedEvents) ) .validateValues(overallAllocationRatios, "overallAllocationRatios") } # stratified enrichment: do nothing more here } # case: survival, two groups - overall else if (.paramExists(dataFrame, C_KEY_WORDS_OVERALL_LOG_RANKS)) { .inputType <<- "overall" overallEvents <<- .getValidatedFloatingPointNumbers( .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_EVENTS), parameterName = "Cumulative events" ) .validateValues(overallEvents, "overallEvents") if (!.enrichmentEnabled) { .assertValuesAreStrictlyIncreasing(overallEvents, "overallEvents", endingNasAllowed = TRUE) } overallLogRanks <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_LOG_RANKS) .validateValues(overallLogRanks, "overallLogRanks") overallAllocationRatios <<- .getValuesByParameterName( dataFrame, parameterNameVariants = C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, defaultValues = .getAllocationRatioDefaultValues(stages, overallEvents, overallLogRanks) ) .validateValues(overallAllocationRatios, "overallAllocationRatios") .setParameterType("groups", C_PARAM_NOT_APPLICABLE) } # case: survival, two groups - stage wise else if (.paramExists(dataFrame, C_KEY_WORDS_LOG_RANKS)) { .inputType <<- "stagewise" events <<- .getValidatedFloatingPointNumbers(.getValuesByParameterName( dataFrame, C_KEY_WORDS_EVENTS ), parameterName = "Events") .validateValues(events, "events") if (any(stats::na.omit(events) < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all events must be >= 0") } logRanks <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_LOG_RANKS) .validateValues(logRanks, "logRanks") allocationRatios <<- .getValuesByParameterName( dataFrame, C_KEY_WORDS_ALLOCATION_RATIOS, defaultValues = .getAllocationRatioDefaultValues(stages, events, logRanks) ) .validateValues(allocationRatios, "allocationRatios") .setParameterType("groups", C_PARAM_NOT_APPLICABLE) } # case: survival, three ore more groups - overall else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_LOG_RANKS, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_OVERALL_LOG_RANKS, 2))) { .inputType <<- "overall" numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_OVERALL_LOG_RANKS) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) overallEvents <<- numeric(0) overallAllocationRatios <<- numeric(0) overallLogRanks <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { overallEventsTemp <- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_EVENTS, suffix = group ) .validateValues(overallEventsTemp, paste0("overallEvents", group)) if (is.null(dataFrame[["subset"]]) || length(unique(dataFrame[["subset"]])) <= 1) { .assertValuesAreStrictlyIncreasing(overallEventsTemp, paste0("overallEvents", group), endingNasAllowed = TRUE ) } overallEvents <<- c(overallEvents, overallEventsTemp) overallLogRanksTemp <- .getValuesByParameterName( dataFrame, C_KEY_WORDS_OVERALL_LOG_RANKS, suffix = group ) .validateValues(overallLogRanksTemp, paste0("overallLogRanks", group)) overallLogRanks <<- c(overallLogRanks, overallLogRanksTemp) overallAllocationRatiosTemp <- .getValuesByParameterName( dataFrame, C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, suffix = group, defaultValues = .getAllocationRatioDefaultValues( overallEventsTemp, overallEventsTemp, overallLogRanksTemp ) ) .validateValues(overallAllocationRatiosTemp, paste0("overallAllocationRatios", group)) overallAllocationRatios <<- c(overallAllocationRatios, overallAllocationRatiosTemp) groups <<- c(groups, rep(as.integer(group), length(overallLogRanksTemp))) } } # case: survival, three ore more groups - stage wise else if (.paramExists(dataFrame, paste0(C_KEY_WORDS_LOG_RANKS, 1)) && .paramExists(dataFrame, paste0(C_KEY_WORDS_LOG_RANKS, 2))) { .inputType <<- "stagewise" numberOfTreatmentGroups <- .getNumberOfGroups(dataFrame, C_KEY_WORDS_LOG_RANKS) stages <<- rep(stages, numberOfTreatmentGroups) groups <<- integer(0) events <<- numeric(0) allocationRatios <<- numeric(0) logRanks <<- numeric(0) for (group in 1:numberOfTreatmentGroups) { eventsTemp <- .getValidatedFloatingPointNumbers(.getValuesByParameterName( dataFrame, C_KEY_WORDS_EVENTS, suffix = group ), parameterName = "Events") if (any(stats::na.omit(eventsTemp) < 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all events must be >= 0, but 'events", group, "' = ", .arrayToString(eventsTemp, vectorLookAndFeelEnabled = TRUE) ) } events <<- c(events, eventsTemp) logRanksTemp <- .getValuesByParameterName( dataFrame, C_KEY_WORDS_LOG_RANKS, suffix = group ) .validateValues(logRanksTemp, paste0("n", group)) logRanks <<- c(logRanks, logRanksTemp) allocationRatiosTemp <- .getValuesByParameterName( dataFrame, C_KEY_WORDS_ALLOCATION_RATIOS, suffix = group, defaultValues = .getAllocationRatioDefaultValues( eventsTemp, eventsTemp, logRanksTemp ) ) .validateValues(allocationRatiosTemp, paste0("allocationRatios", group)) allocationRatios <<- c(allocationRatios, allocationRatiosTemp) groups <<- c(groups, rep(as.integer(group), length(eventsTemp))) } } else { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "unable to identify case for ", .getClassName(.self), " and columns ", .arrayToString(colnames(dataFrame)) ) } if (.inputType == "stagewise") { n <- length(events) overallEvents <<- rep(NA_real_, n) overallAllocationRatios <<- rep(NA_real_, n) overallLogRanks <<- rep(NA_real_, n) .setParameterType("events", C_PARAM_USER_DEFINED) .setParameterType("allocationRatios", C_PARAM_USER_DEFINED) if (!inherits(.self, "DatasetEnrichmentSurvival")) { .setParameterType("logRanks", C_PARAM_USER_DEFINED) } .setParameterType("overallEvents", C_PARAM_GENERATED) .setParameterType("overallAllocationRatios", C_PARAM_GENERATED) if (!inherits(.self, "DatasetEnrichmentSurvival")) { .setParameterType("overallLogRanks", C_PARAM_GENERATED) } if (!inherits(.self, "DatasetEnrichmentSurvival")) { .recreateDataFrame() .createOverallData() } } else { n <- length(overallEvents) events <<- rep(NA_real_, n) allocationRatios <<- rep(NA_real_, n) logRanks <<- rep(NA_real_, n) .setParameterType("events", C_PARAM_GENERATED) .setParameterType("allocationRatios", C_PARAM_GENERATED) if (!inherits(.self, "DatasetEnrichmentSurvival")) { .setParameterType("logRanks", C_PARAM_GENERATED) } .setParameterType("overallEvents", C_PARAM_USER_DEFINED) .setParameterType("overallAllocationRatios", C_PARAM_USER_DEFINED) if (!inherits(.self, "DatasetEnrichmentSurvival")) { .setParameterType("overallLogRanks", C_PARAM_USER_DEFINED) } if (!inherits(.self, "DatasetEnrichmentSurvival")) { .recreateDataFrame() .createStageWiseData() } } }, .recreateDataFrame = function() { callSuper() if (inherits(.self, "DatasetEnrichmentSurvival")) { .data <<- cbind(.data, data.frame( overallEvent = overallEvents, overallExpectedEvent = overallExpectedEvents, overallVarianceEvent = overallVarianceEvents, overallAllocationRatio = overallAllocationRatios, event = events, expectedEvent = expectedEvents, # varianceEvent = varianceEvents, # maybe implemented later allocationRatio = allocationRatios )) } else { .data <<- cbind(.data, data.frame( overallEvent = overallEvents, overallAllocationRatio = overallAllocationRatios, overallLogRank = overallLogRanks, event = events, allocationRatio = allocationRatios, logRank = logRanks )) } .orderDataByStageAndGroup() .setDataToVariables() }, .setDataToVariables = function() { callSuper() overallEvents <<- .data$overallEvent overallAllocationRatios <<- .data$overallAllocationRatio events <<- .data$event allocationRatios <<- .data$allocationRatio if (!inherits(.self, "DatasetEnrichmentSurvival")) { overallLogRanks <<- .data$overallLogRank logRanks <<- .data$logRank } }, .fillWithNAs = function(kMax) { callSuper(kMax) n <- .getNumberOfNAsToAdd(kMax) overallEvents <<- c(overallEvents, rep(NA_real_, n)) overallAllocationRatios <<- c(overallAllocationRatios, rep(NA_real_, n)) overallLogRanks <<- c(overallLogRanks, rep(NA_real_, n)) events <<- c(events, rep(NA_real_, n)) allocationRatios <<- c(allocationRatios, rep(NA_real_, n)) logRanks <<- c(logRanks, rep(NA_real_, n)) .recreateDataFrame() }, .trim = function(kMax = NA_integer_) { indices <- callSuper(kMax) if (length(indices) == 0) { return(invisible(FALSE)) } events <<- events[indices] allocationRatios <<- allocationRatios[indices] logRanks <<- logRanks[indices] overallEvents <<- overallEvents[indices] overallAllocationRatios <<- overallAllocationRatios[indices] overallLogRanks <<- overallLogRanks[indices] .recreateDataFrame() return(invisible(TRUE)) }, getRandomData = function() { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "the function 'DatasetSurvival.getRandomData()' is not implemented yet" ) }, .getOverallLogRanks = function(logRanks, events, overallEvents, kMax = length(logRanks), stage = length(logRanks)) { result <- c(logRanks[1:stage], rep(NA_real_, kMax - stage)) if (stage == 1) { return(result) } for (k in 2:stage) { result[k] <- (sqrt(events[k]) * logRanks[k] + sqrt(overallEvents[k - 1]) * result[k - 1]) / sqrt(overallEvents[k]) } return(result) }, .getOverallAllocationRatios = function(allocationRatios, events, overallEvents, kMax = length(allocationRatios), stage = length(allocationRatios)) { result <- c( allocationRatios[1:stage], rep(NA_real_, kMax - stage) ) if (stage == 1) { return(result) } for (k in 2:stage) { result[k] <- (events[k] * allocationRatios[k] + overallEvents[k - 1] * result[k - 1]) / overallEvents[k] } return(result) }, .createOverallData = function() { .data$overallEvent <<- rep(NA_real_, nrow(.data)) if (inherits(.self, "DatasetEnrichmentSurvival")) { .data$overallExpectedEvent <<- rep(NA_real_, nrow(.data)) .data$overallVarianceEvent <<- rep(NA_real_, nrow(.data)) } else { .data$overallLogRank <<- rep(NA_real_, nrow(.data)) } .data$overallAllocationRatio <<- rep(NA_real_, nrow(.data)) subsetLevels <- NA_character_ if (.enrichmentEnabled) { subsetLevels <- levels(.data$subset) } for (s in subsetLevels) { for (g in levels(.data$group)) { if (!is.na(s)) { indices <- which(.data$subset == s & .data$group == g) } else { indices <- which(.data$group == g) } .data$overallEvent[indices] <<- cumsum(.data$event[indices]) .data$overallExpectedEvent[indices] <<- cumsum(.data$expectedEvent[indices]) # .data$overallVarianceEvent[indices] <<- # maybe implemented later .data$overallLogRank[indices] <<- .getOverallLogRanks( .data$logRank[indices], .data$event[indices], .data$overallEvent[indices] ) .data$overallAllocationRatio[indices] <<- .getOverallAllocationRatios( .data$allocationRatio[indices], .data$event[indices], .data$overallEvent[indices] ) } } .setDataToVariables() }, .getStageWiseEvents = function(overallEvents) { result <- overallEvents if (length(result) == 1) { return(result) } kMax <- length(result) result[2:kMax] <- overallEvents[2:kMax] - overallEvents[1:(kMax - 1)] return(result) }, .getStageWiseLogRanks = function(overallLogRanks, overallEvents) { result <- overallLogRanks if (length(result) == 1) { return(result) } kMax <- length(result) result[2:kMax] <- (sqrt(overallEvents[2:kMax]) * overallLogRanks[2:kMax] - sqrt(overallEvents[1:(kMax - 1)]) * overallLogRanks[1:(kMax - 1)]) / sqrt(overallEvents[2:kMax] - overallEvents[1:(kMax - 1)]) return(result) }, .getStageWiseAllocationRatios = function(overallAllocationRatios, events, overallEvents) { result <- overallAllocationRatios if (length(result) == 1) { return(result) } kMax <- length(result) result[2:kMax] <- ( overallAllocationRatios[2:kMax] - overallAllocationRatios[1:(kMax - 1)] * overallEvents[1:(kMax - 1)] / overallEvents[2:kMax] ) / (events[2:kMax] / overallEvents[2:kMax]) if (any(stats::na.omit(result) <= 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "overall allocation ratios not correctly specified: ", "one or more calculated stage-wise allocation ratios <= 0" ) } return(result) }, .createStageWiseData = function() { "Calculates stage-wise logrank statistics, events, and allocation ratios if cumulative data is available" .data$event <<- rep(NA_real_, nrow(.data)) if (inherits(.self, "DatasetEnrichmentSurvival")) { .data$expectedEvent <<- rep(NA_real_, nrow(.data)) .data$varianceEvent <<- rep(NA_real_, nrow(.data)) } else { .data$logRank <<- rep(NA_real_, nrow(.data)) } .data$allocationRatio <<- rep(NA_real_, nrow(.data)) subsetLevels <- NA_character_ if (.enrichmentEnabled) { subsetLevels <- levels(.data$subset) } for (s in subsetLevels) { for (g in levels(.data$group)) { if (!is.na(s)) { indices <- which(.data$subset == s & .data$group == g) } else { indices <- which(.data$group == g) } groupNumber <- ifelse(levels(.data$group) > 1, g, "") if (.enrichmentEnabled) { .assertValuesAreStrictlyIncreasing(.data$overallEvent[indices], paste0("overallEvents", groupNumber, "[subset == \"", s, "\"]"), endingNasAllowed = TRUE ) } else { .assertValuesAreStrictlyIncreasing(.data$overallEvent[indices], paste0("overallEvents", groupNumber), endingNasAllowed = TRUE ) } .data$event[indices] <<- .getStageWiseEvents(.data$overallEvent[indices]) if (inherits(.self, "DatasetEnrichmentSurvival")) { .data$expectedEvent[indices] <<- .getStageWiseEvents(.data$overallExpectedEvent[indices]) # .data$varianceEvent[indices] <<- # maybe implemented later } else { .data$logRank[indices] <<- .getStageWiseLogRanks( .data$overallLogRank[indices], .data$overallEvent[indices] ) } .data$allocationRatio[indices] <<- .getStageWiseAllocationRatios( .data$overallAllocationRatio[indices], .data$event[indices], .data$overallEvent[indices] ) } } .setDataToVariables() } ) ) #' #' @rdname DatasetSurvival #' #' @keywords internal #' DatasetEnrichmentSurvival <- setRefClass("DatasetEnrichmentSurvival", contains = "DatasetSurvival", fields = list( expectedEvents = "numeric", varianceEvents = "numeric", overallExpectedEvents = "numeric", overallVarianceEvents = "numeric" ), methods = list( .initByDataFrame = function(dataFrame) { callSuper(dataFrame) if (.paramExists(dataFrame, C_KEY_WORDS_OVERALL_EXPECTED_EVENTS) || .paramExists(dataFrame, C_KEY_WORDS_OVERALL_VARIANCE_EVENTS)) { if (!.paramExists(dataFrame, C_KEY_WORDS_OVERALL_EXPECTED_EVENTS)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'overallExpectedEvents' is missing") } if (!.paramExists(dataFrame, C_KEY_WORDS_OVERALL_VARIANCE_EVENTS)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'overallVarianceEvents' is missing") } .inputType <<- "overall" overallEvents <<- .getValidatedFloatingPointNumbers( .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_EVENTS), parameterName = "Cumulative events" ) .validateValues(overallEvents, "overallEvents") overallExpectedEvents <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_EXPECTED_EVENTS) .validateValues(overallExpectedEvents, "overallExpectedEvents") overallVarianceEvents <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_OVERALL_VARIANCE_EVENTS) .validateValues(overallVarianceEvents, "overallVarianceEvents") overallAllocationRatios <<- .getValuesByParameterName( dataFrame, parameterNameVariants = C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, defaultValues = .getAllocationRatioDefaultValues(stages, overallEvents, overallExpectedEvents) ) .validateValues(overallAllocationRatios, "overallAllocationRatios") } else if (.paramExists(dataFrame, C_KEY_WORDS_EXPECTED_EVENTS) || .paramExists(dataFrame, C_KEY_WORDS_VARIANCE_EVENTS)) { if (!.paramExists(dataFrame, C_KEY_WORDS_EXPECTED_EVENTS)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'expectedEvents' is missing") } if (!.paramExists(dataFrame, C_KEY_WORDS_VARIANCE_EVENTS)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'varianceEvents' is missing") } .inputType <<- "stagewise" events <<- .getValidatedFloatingPointNumbers( .getValuesByParameterName(dataFrame, C_KEY_WORDS_EVENTS), parameterName = "Events" ) .validateValues(events, "events") expectedEvents <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_EXPECTED_EVENTS) .validateValues(expectedEvents, "expectedEvents") varianceEvents <<- .getValuesByParameterName(dataFrame, C_KEY_WORDS_VARIANCE_EVENTS) .validateValues(varianceEvents, "varianceEvents") allocationRatios <<- .getValuesByParameterName( dataFrame, parameterNameVariants = C_KEY_WORDS_ALLOCATION_RATIOS, defaultValues = .getAllocationRatioDefaultValues(stages, events, expectedEvents) ) .validateValues(allocationRatios, "allocationRatios") } .setParameterType("groups", C_PARAM_NOT_APPLICABLE) if (.inputType == "stagewise") { n <- length(events) overallExpectedEvents <<- rep(NA_real_, n) overallVarianceEvents <<- rep(NA_real_, n) .setParameterType("events", C_PARAM_USER_DEFINED) .setParameterType("allocationRatios", C_PARAM_USER_DEFINED) .setParameterType("expectedEvents", C_PARAM_USER_DEFINED) .setParameterType("varianceEvents", C_PARAM_USER_DEFINED) .setParameterType("overallEvents", C_PARAM_GENERATED) .setParameterType("overallAllocationRatios", C_PARAM_GENERATED) .setParameterType("overallExpectedEvents", C_PARAM_GENERATED) .setParameterType("overallVarianceEvents", C_PARAM_GENERATED) .recreateDataFrame() .createOverallData() } else { n <- length(overallEvents) expectedEvents <<- rep(NA_real_, n) varianceEvents <<- rep(NA_real_, n) .setParameterType("events", C_PARAM_GENERATED) .setParameterType("allocationRatios", C_PARAM_GENERATED) .setParameterType("expectedEvents", C_PARAM_GENERATED) .setParameterType("varianceEvents", C_PARAM_GENERATED) .setParameterType("overallEvents", C_PARAM_USER_DEFINED) .setParameterType("overallAllocationRatios", C_PARAM_USER_DEFINED) .setParameterType("overallExpectedEvents", C_PARAM_USER_DEFINED) .setParameterType("overallVarianceEvents", C_PARAM_USER_DEFINED) .recreateDataFrame() .createStageWiseData() } }, .getVisibleFieldNames = function() { visibleFieldNames <- callSuper() visibleFieldNames <- visibleFieldNames[!(visibleFieldNames %in% c("logRanks", "overallLogRanks"))] return(visibleFieldNames) }, .setDataToVariables = function() { callSuper() overallExpectedEvents <<- .data$overallExpectedEvent overallVarianceEvents <<- .data$overallVarianceEvent expectedEvents <<- .data$expectedEvent }, getOverallExpectedEvent = function(stage, group = 1, subset = NA_character_) { return(.data$overallExpectedEvent[.getIndices(stage = stage, group = group, subset = subset)]) }, getOverallExpectedEvents = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$overallExpectedEvent[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getOverallExpectedEventsUpTo = function(to, group = 1, subset = NA_character_) { return(.data$overallExpectedEvent[.getIndices(stage = c(1:to), group = group, subset = subset)]) }, getOverallVarianceEvent = function(stage, group = 1, subset = NA_character_) { return(.data$overallVarianceEvent[.getIndices(stage = stage, group = group, subset = subset)]) }, getOverallVarianceEvents = function(..., stage = NA_integer_, group = NA_integer_, subset = NA_character_) { return(.data$overallVarianceEvent[.getIndices(stage = .getValidatedStage(stage), group = group, subset = subset)]) }, getOverallVarianceEventsUpTo = function(to, group = 1, subset = NA_character_) { return(.data$overallVarianceEvent[.getIndices(stage = c(1:to), group = group, subset = subset)]) } ) ) .isFloatingPointSampleSize <- function(object, param) { values <- object[[param]] if (is.null(values)) { return(FALSE) } values <- na.omit(values) if (length(values) == 0) { return(FALSE) } if (any(floor(values) != values)) { return(TRUE) } return(FALSE) } .getMaxDigits <- function(values) { values <- na.omit(values) if (length(values) == 0) { return(0) } values <- trimws(format(values, scientific = FALSE, digits = 15)) values <- gsub("^\\d*\\.", "", values) values <- gsub("\\D", "", values) max(nchar(values)) } #' #' @title #' Dataset Summary #' #' @description #' Displays a summary of \code{\link{Dataset}} object. #' #' @param object A \code{\link{Dataset}} object. #' @inheritParams param_digits #' @inheritParams param_three_dots #' #' @details #' Summarizes the parameters and results of a dataset. #' #' @template details_summary #' #' @template return_object_summary #' @template how_to_get_help_for_generics #' #' @export #' #' @keywords internal #' summary.Dataset <- function(object, ..., type = 1, digits = NA_integer_) { .warnInCaseOfUnknownArguments(functionName = "summary", ...) if (type == 1 && inherits(object, "SummaryFactory")) { return(object) } if (type != 1) { return(summary.ParameterSet(object, type = type, digits = digits, ...)) } intervalFormat <- getOption("rpact.summary.intervalFormat", "[%s; %s]") .assertIsValidSummaryIntervalFormat(intervalFormat) summaryFactory <- SummaryFactory(object = object, intervalFormat = intervalFormat) s <- object$.toString() kMax <- object$getNumberOfStages() summaryFactory$title <- .firstCharacterToUpperCase(s) numberOfGroups <- object$getNumberOfGroups() if (numberOfGroups == 1) { groups <- "one sample" } else if (numberOfGroups == 2) { groups <- c("one treatment", "one control group") if (object$isDatasetSurvival()) { groups <- paste0(groups, c(" (1)", " (2)")) } } else { groups <- c(paste0( .integerToWrittenNumber(numberOfGroups - 1), " treatment groups" ), "one control group") if (object$isDatasetSurvival()) { groups <- paste0(groups, c( paste0(" (", .arrayToString(1:(numberOfGroups - 1)), ")"), paste0(" (", numberOfGroups, ")") )) } } prefix <- "" if (object$isDatasetMeans()) { prefix <- "the sample sizes, means, and standard deviations of " } else if (object$isDatasetRates()) { prefix <- "the sample sizes and events of " } else if (object$isDatasetSurvival()) { prefix <- "the events and log rank statistics of the comparison of " } if (numberOfGroups > 1) { prefix <- paste0(prefix, "\n") } header <- paste0( "The dataset contains ", prefix, paste0(groups, collapse = ifelse(object$isDatasetSurvival(), " with ", " and ")) ) if (object$.enrichmentEnabled) { header <- paste0(header, ". The data will be analyzed ", ifelse(object$isStratified(), "", "non-"), "stratified") } if (kMax > 1) { header <- paste0( header, ".\nThe total number of looks is ", .integerToWrittenNumber(kMax), "; stage-wise and cumulative data are included" ) } header <- paste0(header, ".") summaryFactory$header <- header digitSettings <- .getSummaryDigits(digits) digits <- digitSettings$digits digitsSampleSize <- 0 digitsGeneral <- digitSettings$digitsGeneral digitsProbabilities <- digitSettings$digitsProbabilities paramsToCheck <- character(0) if (object$isDatasetMeans() || object$isDatasetRates()) { paramsToCheck <- c(paramsToCheck, "sampleSizes") if (kMax > 1) { paramsToCheck <- c(paramsToCheck, "overallSampleSizes") } } else if (object$isDatasetRates() || object$isDatasetSurvival()) { paramsToCheck <- c(paramsToCheck, "events") if (kMax > 1) { paramsToCheck <- c(paramsToCheck, "overallEvents") } } if (length(paramsToCheck) > 0) { for (param in paramsToCheck) { if (.isFloatingPointSampleSize(object, param)) { digitsSampleSize <- max(digitsSampleSize, .getMaxDigits(object[[param]])) } } digitsSampleSize <- min(digitsSampleSize, digits) } summaryFactory$addItem("Stage", object$stages) if (numberOfGroups > 1) { groupNumbers <- object$groups if (object$isDatasetSurvival()) { groupNumbers <- paste0(object$groups, " vs ", numberOfGroups) summaryFactory$addItem("Comparison", groupNumbers) } else { summaryFactory$addItem("Group", groupNumbers) } } if (object$.enrichmentEnabled) { summaryFactory$addItem("Subset", object$subsets) } parameterCaptionPrefix <- ifelse(kMax == 1, "", "Stage-wise ") if (object$isDatasetMeans() || object$isDatasetRates()) { summaryFactory$addParameter(object, parameterName = "sampleSizes", parameterCaption = .firstCharacterToUpperCase(parameterCaptionPrefix, "sample size"), roundDigits = digitsSampleSize ) if (kMax > 1) { summaryFactory$addParameter(object, parameterName = "overallSampleSizes", parameterCaption = "Cumulative sample size", roundDigits = digitsSampleSize ) } } if (object$isDatasetMeans()) { summaryFactory$addParameter(object, parameterName = "means", parameterCaption = .firstCharacterToUpperCase(parameterCaptionPrefix, "mean"), roundDigits = digitsGeneral ) if (kMax > 1) { summaryFactory$addParameter(object, parameterName = "overallMeans", parameterCaption = "Cumulative mean", roundDigits = digitsGeneral ) } summaryFactory$addParameter(object, parameterName = "stDevs", parameterCaption = .firstCharacterToUpperCase(parameterCaptionPrefix, "standard deviation"), roundDigits = digitsGeneral ) if (kMax > 1) { summaryFactory$addParameter(object, parameterName = "overallStDevs", parameterCaption = "Cumulative standard deviation", roundDigits = digitsGeneral ) } } else if (object$isDatasetRates()) { summaryFactory$addParameter(object, parameterName = "events", parameterCaption = .firstCharacterToUpperCase(parameterCaptionPrefix, "number of events"), roundDigits = digitsSampleSize ) if (kMax > 1) { summaryFactory$addParameter(object, parameterName = "overallEvents", parameterCaption = "Cumulative number of events", roundDigits = digitsSampleSize ) } } else if (object$isDatasetSurvival()) { summaryFactory$addParameter(object, parameterName = "events", parameterCaption = .firstCharacterToUpperCase(parameterCaptionPrefix, "number of events"), roundDigits = digitsSampleSize ) if (kMax > 1) { summaryFactory$addParameter(object, parameterName = "overallEvents", parameterCaption = "Cumulative number of events", roundDigits = digitsSampleSize ) } summaryFactory$addParameter(object, parameterName = "logRanks", parameterCaption = .firstCharacterToUpperCase(parameterCaptionPrefix, "log rank statistic"), roundDigits = digitsGeneral ) if (kMax > 1) { summaryFactory$addParameter(object, parameterName = "overallLogRanks", parameterCaption = "Cumulative log rank statistic", roundDigits = digitsGeneral ) } if (!any(is.na(object$allocationRatios)) && any(object$allocationRatios != 1)) { summaryFactory$addParameter(object, parameterName = "allocationRatios", parameterCaption = .firstCharacterToUpperCase(parameterCaptionPrefix, "allocation ratio"), roundDigits = digitsGeneral ) if (kMax > 1) { summaryFactory$addParameter(object, parameterName = "overallAllocationRatios", parameterCaption = "Cumulative allocation ratio", roundDigits = digitsGeneral ) } } } return(summaryFactory) } .getDatasetArgumentsRCodeLines <- function(x, complete = FALSE, digits = 4) { m <- getWideFormat(x) lines <- character(0) paramNames <- colnames(m) if (!complete) { if (x$.inputType == "stagewise") { paramNames <- paramNames[!grepl("^overall", paramNames)] } else { paramNames <- paramNames[grepl("^(stage|group|subset|overall)", paramNames)] } } for (paramName in paramNames) { encapsulate <- grepl("^subset", paramName) if (!encapsulate || isTRUE(x$.enrichmentEnabled)) { values <- m[[paramName]] if (!encapsulate && is.numeric(values) && !is.null(digits) && length(digits) == 1 && !is.na(digits)) { values <- round(values, digits = digits) } lines <- c(lines, paste0(paramName, " = ", .arrayToString(values, vectorLookAndFeelEnabled = TRUE, encapsulate = encapsulate, digits = NA_integer_ ))) } } return(lines) } #' #' @title #' Print Dataset Values #' #' @description #' \code{print} prints its \code{\link{Dataset}} argument and returns it invisibly (via \code{invisible(x)}). #' #' @param x A \code{\link{Dataset}} object. #' @param markdown If \code{TRUE}, the output will be created in Markdown. #' @param output A character defining the output type, default is "list". #' @inheritParams param_three_dots #' #' @details #' Prints the dataset. #' #' @export #' #' @keywords internal #' print.Dataset <- function(x, ..., markdown = FALSE, output = c("list", "long", "wide", "r", "rComplete")) { fCall <- match.call(expand.dots = FALSE) datasetName <- deparse(fCall$x) output <- match.arg(output) if (markdown) { if (output != "list") { warning("'output' (\"", output, "\") will be ignored ", "because only \"list\" is supported yet if markdown is enabled", call. = FALSE ) } x$.catMarkdownText() return(invisible(x)) } if (output == "long") { m <- getLongFormat(x) m <- prmatrix(m, rowlab = rep("", nrow(m))) print(m, quote = FALSE, right = FALSE) return(invisible(x)) } else if (output == "wide") { m <- getWideFormat(x) m <- prmatrix(m, rowlab = rep("", nrow(m))) print(m, quote = FALSE, right = FALSE) return(invisible(x)) } else if (output %in% c("r", "rComplete")) { lines <- .getDatasetArgumentsRCodeLines(x, complete = (output == "rComplete")) lines <- paste0("\t", lines) if (is.null(datasetName) || length(datasetName) != 1 || is.na(datasetName)) { datasetName <- "dataInput" } cat(datasetName, " <- getDataset(\n", sep = "") cat(paste0(lines, collapse = ",\n"), "\n") cat(")\n") return(invisible(x)) } x$show() return(invisible(x)) } rpact/R/f_simulation_base_rates.R0000644000176200001440000005650614445307576016623 0ustar liggesusers## | ## | *Simulation of binary data with group sequential and combination test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | .getSimulationRatesStageSubjects <- function( ..., stage, riskRatio, thetaH0, groups, plannedSubjects, directionUpper, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, sampleSizesPerStage, conditionalPower, conditionalCriticalValue, overallRate, farringtonManningValue1, farringtonManningValue2) { if (is.na(conditionalPower)) { return(plannedSubjects[stage] - plannedSubjects[stage - 1]) } if (groups == 1) { stageSubjects <- (max(0, conditionalCriticalValue * sqrt(thetaH0 * (1 - thetaH0)) + .getQNorm(conditionalPower) * sqrt(overallRate[1] * (1 - overallRate[1]))))^2 / (max(1e-12, (2 * directionUpper - 1) * (overallRate[1] - thetaH0)))^2 } else { mult <- 1 corr <- thetaH0 if (riskRatio) { mult <- thetaH0 corr <- 0 } stageSubjects <- (1 + 1 / allocationRatioPlanned[stage]) * (max(0, conditionalCriticalValue * sqrt(farringtonManningValue1 * (1 - farringtonManningValue1) + farringtonManningValue2 * (1 - farringtonManningValue2) * allocationRatioPlanned[stage] * mult^2) + .getQNorm(conditionalPower) * sqrt(overallRate[1] * (1 - overallRate[1]) + overallRate[2] * (1 - overallRate[2]) * allocationRatioPlanned[stage] * mult^2)))^2 / (max(1e-12, (2 * directionUpper - 1) * (overallRate[1] - mult * overallRate[2] - corr)))^2 } stageSubjects <- ceiling(min(max(minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage])) return(stageSubjects) } #' @title #' Get Simulation Rates #' #' @description #' Returns the simulated power, stopping probabilities, conditional power, and expected sample size for #' testing rates in a one or two treatment groups testing situation. #' #' @inheritParams param_design_with_default #' @inheritParams param_groups #' @inheritParams param_normalApproximation #' @param riskRatio If \code{TRUE}, the design characteristics for #' one-sided testing of H0: \code{pi1 / pi2 = thetaH0} are simulated, default is \code{FALSE}. #' @inheritParams param_thetaH0 #' @inheritParams param_pi1_rates #' @inheritParams param_pi2_rates #' @inheritParams param_directionUpper #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_plannedSubjects #' @inheritParams param_minNumberOfSubjectsPerStage #' @inheritParams param_maxNumberOfSubjectsPerStage #' @inheritParams param_conditionalPowerSimulation #' @param pi1H1 If specified, the assumed probability in the active treatment group if two treatment groups #' are considered, or the assumed probability for a one treatment group design, for which the conditional #' power was calculated. #' @param pi2H1 If specified, the assumed probability in the reference group if two treatment groups #' are considered, for which the conditional power was calculated. #' @inheritParams param_maxNumberOfIterations #' @inheritParams param_calcSubjectsFunction #' @inheritParams param_seed #' @inheritParams param_three_dots #' @inheritParams param_showStatistics #' #' @details #' At given design the function simulates the power, stopping probabilities, conditional power, and expected #' sample size at given number of subjects and parameter configuration. #' Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number #' of subjects in the two treatment groups. #' #' The definition of \code{pi1H1} and/or \code{pi2H1} makes only sense if \code{kMax} > 1 #' and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and #' \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. #' #' \code{calcSubjectsFunction}\cr #' This function returns the number of subjects at given conditional power and conditional critical value for specified #' testing situation. The function might depend on variables #' \code{stage}, #' \code{riskRatio}, #' \code{thetaH0}, #' \code{groups}, #' \code{plannedSubjects}, #' \code{sampleSizesPerStage}, #' \code{directionUpper}, #' \code{allocationRatioPlanned}, #' \code{minNumberOfSubjectsPerStage}, #' \code{maxNumberOfSubjectsPerStage}, #' \code{conditionalPower}, #' \code{conditionalCriticalValue}, #' \code{overallRate}, #' \code{farringtonManningValue1}, and \code{farringtonManningValue2}. #' The function has to contain the three-dots argument '...' (see examples). #' #' @section Simulation Data: #' The summary statistics "Simulated data" contains the following parameters: median [range]; mean +/-sd\cr #' #' \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable #' the output of the aggregated simulated data.\cr #' #' Example 1: \cr #' \code{simulationResults <- getSimulationRates(plannedSubjects = 40)} \cr #' \code{simulationResults$show(showStatistics = FALSE)}\cr #' #' Example 2: \cr #' \code{simulationResults <- getSimulationRates(plannedSubjects = 40)} \cr #' \code{simulationResults$setShowStatistics(FALSE)}\cr #' \code{simulationResults}\cr #' #' \code{\link[=getData]{getData()}} can be used to get the aggregated simulated data from the #' object as \code{\link[base]{data.frame}}. The data frame contains the following columns: #' \enumerate{ #' \item \code{iterationNumber}: The number of the simulation iteration. #' \item \code{stageNumber}: The stage. #' \item \code{pi1}: The assumed or derived event rate in the treatment group (if available). #' \item \code{pi2}: The assumed or derived event rate in the control group (if available). #' \item \code{numberOfSubjects}: The number of subjects under consideration when the #' (interim) analysis takes place. #' \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. #' \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. #' \item \code{testStatistic}: The test statistic that is used for the test decision, #' depends on which design was chosen (group sequential, inverse normal, #' or Fisher combination test)' #' \item \code{testStatisticsPerStage}: The test statistic for each stage if only data from #' the considered stage is taken into account. #' \item \code{overallRate1}: The cumulative rate in treatment group 1. #' \item \code{overallRate2}: The cumulative rate in treatment group 2. #' \item \code{stagewiseRates1}: The stage-wise rate in treatment group 1. #' \item \code{stagewiseRates2}: The stage-wise rate in treatment group 2. #' \item \code{sampleSizesPerStage1}: The stage-wise sample size in treatment group 1. #' \item \code{sampleSizesPerStage2}: The stage-wise sample size in treatment group 2. #' \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. #' \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for #' selected sample size and effect. The effect is either estimated from the data or can be #' user defined with \code{pi1H1} and \code{pi2H1}. #' } #' #' @template return_object_simulation_results #' @template how_to_get_help_for_generics #' #' @template examples_get_simulation_rates #' #' @export #' getSimulationRates <- function(design = NULL, ..., groups = 2L, normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = seq(0.2, 0.5, 0.1), # C_PI_1_DEFAULT pi2 = NA_real_, plannedSubjects = NA_real_, directionUpper = TRUE, # C_DIRECTION_UPPER_DEFAULT allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, pi1H1 = NA_real_, pi2H1 = NA_real_, maxNumberOfIterations = 1000L, # C_MAX_SIMULATION_ITERATIONS_DEFAULT seed = NA_real_, calcSubjectsFunction = NULL, showStatistics = FALSE) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "simulation") .warnInCaseOfUnknownArguments( functionName = "getSimulationRates", ignore = c( .getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "showStatistics" ), ... ) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments( functionName = "getSimulationRates", ignore = c("showStatistics"), ... ) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsSingleLogical(directionUpper, "directionUpper") .assertIsSingleNumber(thetaH0, "thetaH0") .assertIsValidGroupsParameter(groups) .assertIsSingleLogical(normalApproximation, "normalApproximation") .assertIsSingleLogical(riskRatio, "riskRatio") if (groups == 1L) { .assertIsInOpenInterval(thetaH0, "thetaH0", 0, 1, naAllowed = FALSE) } else { if (riskRatio) { .assertIsInOpenInterval(thetaH0, "thetaH0", 0, NULL, naAllowed = TRUE) } else { .assertIsInOpenInterval(thetaH0, "thetaH0", -1, 1, naAllowed = TRUE) } } .assertIsNumericVector(pi1, "pi1", naAllowed = FALSE) .assertIsInOpenInterval(pi1, "pi1", 0, 1, naAllowed = FALSE) .assertIsNumericVector(pi2, "pi2", naAllowed = TRUE) .assertIsInOpenInterval(pi2, "pi2", 0, 1, naAllowed = TRUE) .assertIsNumericVector(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", naAllowed = TRUE) .assertIsNumericVector(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", naAllowed = TRUE) .assertIsSingleNumber(conditionalPower, "conditionalPower", naAllowed = TRUE) .assertIsInOpenInterval(conditionalPower, "conditionalPower", 0, 1, naAllowed = TRUE) .assertIsSingleNumber(pi1H1, "pi1H1", naAllowed = TRUE) .assertIsInOpenInterval(pi1H1, "pi1H1", 0, 1, naAllowed = TRUE) .assertIsSingleNumber(pi2H1, "pi2H1", naAllowed = TRUE) .assertIsInOpenInterval(pi2H1, "pi2H1", 0, 1, naAllowed = TRUE) .assertIsNumericVector(allocationRatioPlanned, "allocationRatioPlanned", naAllowed = TRUE) .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM, naAllowed = TRUE) .assertIsSinglePositiveInteger(maxNumberOfIterations, "maxNumberOfIterations", validateType = FALSE) .assertIsSingleNumber(seed, "seed", naAllowed = TRUE) .assertIsSingleLogical(showStatistics, "showStatistics", naAllowed = FALSE) .assertIsValidPlannedSubjectsOrEvents(design, plannedSubjects, parameterName = "plannedSubjects") if (design$sided == 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "only one-sided case is implemented for the simulation design" ) } if (!normalApproximation && (groups == 2) && (riskRatio || (thetaH0 != 0))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "in the two-sample case, exact test is implemented only for testing H0: pi1 - pi2 = 0" ) } simulationResults <- SimulationResultsRates(design, showStatistics = showStatistics) conditionalPower <- .ignoreParameterIfNotUsed( "conditionalPower", conditionalPower, design$kMax > 1, "design is fixed ('kMax' = 1)" ) minNumberOfSubjectsPerStage <- .ignoreParameterIfNotUsed( "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, design$kMax > 1, "design is fixed ('kMax' = 1)" ) maxNumberOfSubjectsPerStage <- .ignoreParameterIfNotUsed( "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, design$kMax > 1, "design is fixed ('kMax' = 1)" ) minNumberOfSubjectsPerStage <- .assertIsValidNumberOfSubjectsPerStage(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, calcSubjectsFunction, design$kMax, endpoint = "rates" ) maxNumberOfSubjectsPerStage <- .assertIsValidNumberOfSubjectsPerStage(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, calcSubjectsFunction, design$kMax, endpoint = "rates" ) if (design$kMax > 1) { if (any(maxNumberOfSubjectsPerStage - minNumberOfSubjectsPerStage < 0) && !all(is.na(maxNumberOfSubjectsPerStage - minNumberOfSubjectsPerStage))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") must be not smaller than minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ")" ) } .setValueAndParameterType( simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_ ) .setValueAndParameterType( simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_ ) } if (design$kMax == 1 && !is.na(conditionalPower)) { warning("'conditionalPower' will be ignored for fixed sample design", call. = FALSE) } if (design$kMax > 1 && is.na(conditionalPower) && is.null(calcSubjectsFunction)) { if (length(minNumberOfSubjectsPerStage) != 1 || !is.na(minNumberOfSubjectsPerStage)) { warning("'minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ") ", "will be ignored because neither 'conditionalPower' nor ", "'calcSubjectsFunction' is defined", call. = FALSE ) simulationResults$minNumberOfSubjectsPerStage <- NA_real_ } if (length(maxNumberOfSubjectsPerStage) != 1 || !is.na(maxNumberOfSubjectsPerStage)) { warning("'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") ", "will be ignored because neither 'conditionalPower' nor ", "'calcSubjectsFunction' is defined", call. = FALSE ) simulationResults$maxNumberOfSubjectsPerStage <- NA_real_ } } pi1H1 <- .ignoreParameterIfNotUsed( "pi1H1", pi1H1, design$kMax > 1, "design is fixed ('kMax' = 1)" ) pi2H1 <- .ignoreParameterIfNotUsed( "pi2H1", pi2H1, design$kMax > 1, "design is fixed ('kMax' = 1)" ) pi1H1 <- .ignoreParameterIfNotUsed("pi1H1", pi1H1, groups == 2, "'groups' = 1") pi2H1 <- .ignoreParameterIfNotUsed("pi2H1", pi2H1, groups == 2, "'groups' = 1") .setValueAndParameterType(simulationResults, "pi2", pi2, NA_real_) if (groups == 1L) { if (isTRUE(riskRatio)) { warning("'riskRatio' (", riskRatio, ") will be ignored ", "because it is not applicable for 'groups' = 1", call. = FALSE ) } if (!is.na(allocationRatioPlanned)) { warning("'allocationRatioPlanned' (", allocationRatioPlanned, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE ) simulationResults$allocationRatioPlanned <- NA_real_ } simulationResults$.setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) if (!is.na(pi2)) { warning("'pi2' (", pi2, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE ) simulationResults$pi2 <- NA_real_ } simulationResults$.setParameterType("pi2", C_PARAM_NOT_APPLICABLE) } else { if (any(is.na(allocationRatioPlanned))) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } if (is.na(pi2)) { pi2 <- C_PI_2_DEFAULT simulationResults$pi2 <- pi2 simulationResults$.setParameterType("pi2", C_PARAM_DEFAULT_VALUE) } if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, design$kMax) } else if (length(allocationRatioPlanned) != design$kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'allocationRatioPlanned' (", .arrayToString(allocationRatioPlanned), ") ", "must have length 1 or ", design$kMax, " (kMax)" ) } if (length(unique(allocationRatioPlanned)) == 1) { .setValueAndParameterType( simulationResults, "allocationRatioPlanned", allocationRatioPlanned[1], defaultValue = 1 ) } else { .setValueAndParameterType( simulationResults, "allocationRatioPlanned", allocationRatioPlanned, defaultValue = rep(1, design$kMax) ) } } if (groups == 1) { effect <- pi1 - thetaH0 } else { if (riskRatio) { effect <- pi1 / pi2 - thetaH0 } else { effect <- pi1 - pi2 - thetaH0 } } simulationResults$effect <- effect simulationResults$.setParameterType( "effect", ifelse(groups == 1 && thetaH0 == 0, C_PARAM_NOT_APPLICABLE, C_PARAM_GENERATED) ) .setValueAndParameterType(simulationResults, "normalApproximation", normalApproximation, TRUE) .setValueAndParameterType(simulationResults, "riskRatio", riskRatio, FALSE) .setValueAndParameterType(simulationResults, "thetaH0", thetaH0, ifelse(riskRatio, 1, 0)) .setValueAndParameterType(simulationResults, "pi1", pi1, C_PI_1_DEFAULT) .setValueAndParameterType(simulationResults, "groups", as.integer(groups), 2L) .setValueAndParameterType( simulationResults, "plannedSubjects", plannedSubjects, NA_real_ ) .setValueAndParameterType( simulationResults, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT ) .setValueAndParameterType(simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "conditionalPower", conditionalPower, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "pi1H1", pi1H1, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "pi2H1", pi2H1, 0.2, notApplicableIfNA = TRUE) .setValueAndParameterType( simulationResults, "maxNumberOfIterations", as.integer(maxNumberOfIterations), C_MAX_SIMULATION_ITERATIONS_DEFAULT ) simulationResults$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) simulationResults$seed <- .setSeed(seed) if (.isTrialDesignGroupSequential(design)) { designNumber <- 1L } else if (.isTrialDesignInverseNormal(design)) { designNumber <- 2L } else if (.isTrialDesignFisher(design)) { designNumber <- 3L } if (.isTrialDesignFisher(design)) { alpha0Vec <- design$alpha0Vec futilityBounds <- rep(NA_real_, design$kMax - 1) } else { alpha0Vec <- rep(NA_real_, design$kMax - 1) futilityBounds <- design$futilityBounds } calcSubjectsFunctionList <- .getCalcSubjectsFunction( design = design, simulationResults = simulationResults, calcFunction = calcSubjectsFunction, expectedFunction = .getSimulationRatesStageSubjects ) calcSubjectsFunctionType <- calcSubjectsFunctionList$calcSubjectsFunctionType calcSubjectsFunctionR <- calcSubjectsFunctionList$calcSubjectsFunctionR calcSubjectsFunctionCpp <- calcSubjectsFunctionList$calcSubjectsFunctionCpp cppResult <- getSimulationRatesCpp( kMax = design$kMax, informationRates = design$informationRates, criticalValues = design$criticalValues, pi1 = pi1, pi2 = pi2, maxNumberOfIterations = maxNumberOfIterations, designNumber = designNumber, groups = groups, futilityBounds = futilityBounds, alpha0Vec = alpha0Vec, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, conditionalPower = conditionalPower, pi1H1 = pi1H1, pi2H1 = pi2H1, normalApproximation = normalApproximation, plannedSubjects = plannedSubjects, directionUpper = directionUpper, allocationRatioPlanned = allocationRatioPlanned, riskRatio = riskRatio, thetaH0 = thetaH0, calcSubjectsFunctionType = calcSubjectsFunctionType, calcSubjectsFunctionR = calcSubjectsFunctionR, calcSubjectsFunctionCpp = calcSubjectsFunctionCpp ) data <- cppResult$data data <- data[!is.na(data$pi1), ] simulationResults$.data <- data simulationResults$iterations <- cppResult$iterations simulationResults$sampleSizes <- cppResult$sampleSizes simulationResults$rejectPerStage <- cppResult$rejectPerStage simulationResults$overallReject <- cppResult$overallReject simulationResults$futilityPerStage <- cppResult$futilityPerStage simulationResults$futilityStop <- cppResult$futilityStop simulationResults$earlyStop <- cppResult$earlyStop simulationResults$expectedNumberOfSubjects <- cppResult$expectedNumberOfSubjects simulationResults$conditionalPowerAchieved <- cppResult$conditionalPowerAchieved if (!all(is.na(simulationResults$conditionalPowerAchieved))) { simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } return(simulationResults) } rpact/R/f_analysis_enrichment.R0000644000176200001440000004302314445307575016273 0ustar liggesusers## | ## | *Analysis of enrichment designs with adaptive test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R NULL #' #' @title #' Get Enrichment Analysis Results #' #' @description #' Calculates and returns the analysis results for the specified design and data. #' #' @noRd #' .getAnalysisResultsEnrichment <- function(design, dataInput, ..., intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = NA_real_, nPlanned = NA_real_) { .assertIsTrialDesignInverseNormalOrFisher(design) .assertIsValidIntersectionTestEnrichment(design, intersectionTest) .assertIsOneSidedDesign(design, designType = "enrichment", engineType = "analysis") stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design, showWarnings = TRUE) .assertIsSingleLogical(directionUpper, "directionUpper") .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) on.exit(dataInput$.trim()) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertIsValidNPlanned(nPlanned, design$kMax, stage, required = FALSE) if (dataInput$isDatasetMeans()) { if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_MEANS_DEFAULT } return(.getAnalysisResultsMeansEnrichment( design = design, dataInput = dataInput, intersectionTest = intersectionTest, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, stage = stage, ... )) } if (dataInput$isDatasetRates()) { if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_RATES_DEFAULT } return(.getAnalysisResultsRatesEnrichment( design = design, dataInput = dataInput, intersectionTest = intersectionTest, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, stage = stage, ... )) } if (dataInput$isDatasetSurvival()) { if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_SURVIVAL_DEFAULT } return(.getAnalysisResultsSurvivalEnrichment( design = design, dataInput = dataInput, intersectionTest = intersectionTest, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, stage = stage, ... )) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(dataInput), "' is not implemented yet") } #' #' Get Stage Results #' #' Returns summary statistics and p-values for a given data set and a given enrichment design. #' #' @noRd #' .getStageResultsEnrichment <- function(design, dataInput, ...) { .assertIsTrialDesignInverseNormalOrFisher(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) on.exit(dataInput$.trim()) if (dataInput$isDatasetMeans()) { return(.getStageResultsMeansEnrichment(design = design, dataInput = dataInput, userFunctionCallEnabled = TRUE, ...)) } if (dataInput$isDatasetRates()) { return(.getStageResultsRatesEnrichment(design = design, dataInput = dataInput, userFunctionCallEnabled = TRUE, ...)) } if (dataInput$isDatasetSurvival()) { return(.getStageResultsSurvivalEnrichment(design = design, dataInput = dataInput, userFunctionCallEnabled = TRUE, ...)) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(dataInput), "' is not supported") } #' #' Get Repeated Confidence Intervals for enrichment case #' #' Calculates and returns the lower and upper limit of the repeated confidence intervals of the trial for enrichment designs. #' #' @noRd #' .getRepeatedConfidenceIntervalsEnrichment <- function(design, dataInput, ...) { .assertIsTrialDesignInverseNormalOrFisher(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) on.exit(dataInput$.trim()) if (dataInput$isDatasetMeans()) { return(.getRepeatedConfidenceIntervalsMeansEnrichment( design = design, dataInput = dataInput, ... )) } if (dataInput$isDatasetRates()) { return(.getRepeatedConfidenceIntervalsRatesEnrichment( design = design, dataInput = dataInput, ... )) } if (dataInput$isDatasetSurvival()) { return(.getRepeatedConfidenceIntervalsSurvivalEnrichment( design = design, dataInput = dataInput, ... )) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(dataInput), "' is not implemented yet") } #' #' Get Conditional Power for enrichment case #' #' Calculates and returns the conditional power for enrichment case. #' #' @noRd #' .getConditionalPowerEnrichment <- function(..., stageResults, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT) { .assertIsStageResults(stageResults) if (stageResults$isDatasetMeans()) { if ("assumedStDev" %in% names(list(...))) { warning("For enrichment analysis the argument for assumed standard deviation ", "is named 'assumedStDevs' and not 'assumedStDev'", call. = FALSE ) } return(.getConditionalPowerMeansEnrichment( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } if (stageResults$isDatasetRates()) { return(.getConditionalPowerRatesEnrichment( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } if (stageResults$isDatasetSurvival()) { return(.getConditionalPowerSurvivalEnrichment( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(stageResults$.dataInput), "' is not implemented yet" ) } #' #' Repeated p-values for enrichment designs #' #' @noRd #' .getRepeatedPValuesEnrichment <- function(stageResults, ..., tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = "getRepeatedPValuesEnrichment", ...) return(.getRepeatedPValuesMultiArm(stageResults = stageResults, tolerance = tolerance, ...)) } #' #' Calculation of conditional rejection probability (CRP) #' #' @noRd #' .getConditionalRejectionProbabilitiesEnrichment <- function(stageResults, ..., stage = stageResults$stage, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsValidStage(stage, stageResults$.design$kMax) gMax <- stageResults$getGMax() if (.isTrialDesignInverseNormal(stageResults$.design)) { return(.getConditionalRejectionProbabilitiesEnrichmentInverseNormal( stageResults = stageResults, stage = stage, ... )) } else if (.isTrialDesignFisher(stageResults$.design)) { return(.getConditionalRejectionProbabilitiesEnrichmentFisher( stageResults = stageResults, stage = stage, ... )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of TrialDesignInverseNormal or TrialDesignFisher" ) } #' #' Calculation of CRP based on inverse normal method #' #' @noRd #' .getConditionalRejectionProbabilitiesEnrichmentInverseNormal <- function(..., stageResults, stage) { design <- stageResults$.design .assertIsTrialDesignInverseNormal(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalRejectionProbabilitiesEnrichmentInverseNormal", ignore = c("stage", "design"), ... ) kMax <- design$kMax if (kMax == 1) { return(as.matrix(NA_real_)) } gMax <- stageResults$getGMax() conditionalRejectionProbabilities <- matrix(NA_real_, nrow = gMax, ncol = kMax) weights <- .getWeightsInverseNormal(design) informationRates <- design$informationRates ctr <- .performClosedCombinationTest(stageResults = stageResults) criticalValues <- design$criticalValues for (stageIndex in (1:min(stage, kMax - 1))) { for (g in 1:gMax) { if (!is.na(ctr$separatePValues[g, stageIndex])) { # shifted decision region for use in getGroupSeqProbs # Inverse Normal Method shiftedDecisionRegionUpper <- criticalValues[(stageIndex + 1):kMax] * sqrt(sum(weights[1:stageIndex]^2) + cumsum(weights[(stageIndex + 1):kMax]^2)) / sqrt(cumsum(weights[(stageIndex + 1):kMax]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, g] == 1, stageIndex], na.rm = TRUE) * sqrt(sum(weights[1:stageIndex]^2)) / sqrt(cumsum(weights[(stageIndex + 1):kMax]^2)) if (stageIndex == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stageIndex + 1):(kMax - 1)] * sqrt(sum(weights[1:stageIndex]^2) + cumsum(weights[(stageIndex + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stageIndex + 1):(kMax - 1)]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, g] == 1, stageIndex], na.rm = TRUE) * sqrt(sum(weights[1:stageIndex]^2)) / sqrt(cumsum(weights[(stageIndex + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stageIndex + 1):kMax] - informationRates[stageIndex]) / (1 - informationRates[stageIndex]) decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) conditionalRejectionProbabilities[g, stageIndex] <- sum(probs[3, ] - probs[2, ]) } } } return(conditionalRejectionProbabilities) } #' #' Calculation of conditional rejection probability based on Fisher's combination test #' #' @noRd #' .getConditionalRejectionProbabilitiesEnrichmentFisher <- function(..., stageResults, stage) { design <- stageResults$.design .assertIsTrialDesignFisher(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalRejectionProbabilitiesEnrichmentFisher", ignore = c("stage", "design"), ... ) kMax <- design$kMax if (kMax == 1) { return(as.matrix(NA_real_)) } gMax <- stageResults$getGMax() criticalValues <- design$criticalValues weights <- .getWeightsFisher(design) intersectionTest <- stageResults$intersectionTest conditionalRejectionProbabilities <- matrix(NA_real_, nrow = gMax, ncol = kMax) if (design$bindingFutility) { alpha0Vec <- design$alpha0Vec } else { alpha0Vec <- rep(1, kMax - 1) } for (g in 1:gMax) { for (stageIndex in (1:min(stage, kMax - 1))) { if (!is.na(stageResults$separatePValues[g, stageIndex])) { if (gMax == 1) { pValues <- stageResults$separatePValues[1, 1:stageIndex] } else { ctr <- .performClosedCombinationTest( stageResults = stageResults, design = design, intersectionTest = intersectionTest ) pValues <- ctr$adjustedStageWisePValues[ctr$indices[, g] == 1, ][which.max( ctr$overallAdjustedTestStatistics[ctr$indices[, g] == 1, stageIndex] ), 1:stageIndex] } if (prod(pValues^weights[1:stageIndex]) <= criticalValues[stageIndex]) { conditionalRejectionProbabilities[g, stageIndex] <- 1 } else { if (stageIndex < kMax - 1) { conditionalRejectionProbabilities[g, stageIndex] <- .getFisherCombinationSize( kMax - stageIndex, alpha0Vec[(stageIndex + 1):(kMax - 1)], (criticalValues[(stageIndex + 1):kMax] / prod(pValues^weights[1:stageIndex]))^(1 / weights[stageIndex + 1]), weights[(stageIndex + 2):kMax] / weights[stageIndex + 1] ) } else { conditionalRejectionProbabilities[g, stageIndex] <- (criticalValues[kMax] / prod(pValues^weights[1:stageIndex]))^(1 / weights[kMax]) } } if (design$bindingFutility) { if (pValues[stageIndex] > alpha0Vec[stageIndex]) { conditionalRejectionProbabilities[g, stageIndex:stage] <- 0 break } } } } } conditionalRejectionProbabilities[conditionalRejectionProbabilities >= 1] <- 1 conditionalRejectionProbabilities[conditionalRejectionProbabilities < 0] <- NA_real_ return(conditionalRejectionProbabilities) } #' #' Plotting conditional power and likelihood #' #' @noRd #' .getConditionalPowerPlotEnrichment <- function(stageResults, ..., nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaRange = NA_real_, assumedStDevs = NA_real_, piTreatmentRange = NA_real_, piControls = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_, showArms = NA_real_) { .stopInCaseOfIllegalStageDefinition2(...) kMax <- stageResults$.design$kMax stage <- stageResults$stage if (stage == kMax && length(nPlanned) > 0) { stage <- kMax - 1 } if (stage < 1 || kMax == 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "cannot plot conditional power of a fixed design") } if (stage >= kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the conditional power plot is only available for subsequent stages. ", "Please specify a 'stage' (", stage, ") < 'kMax' (", kMax, ")" ) } .assertIsValidNPlanned(nPlanned = nPlanned, kMax = kMax, stage = stage) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) if (stageResults$isDatasetMeans()) { .warnInCaseOfUnusedArgument(piTreatmentRange, "piTreatmentRange", NA_real_, "plot") .warnInCaseOfUnusedArgument(piControls, "piControls", NA_real_, "plot") return(.getConditionalPowerLikelihoodMeansEnrichment( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaRange = thetaRange, assumedStDevs = assumedStDevs, iterations = iterations, seed = seed )) } else if (stageResults$isDatasetRates()) { .warnInCaseOfUnusedArgument(thetaRange, "thetaRange", NA_real_, "plot") .warnInCaseOfUnusedArgument(assumedStDevs, "assumedStDevs", NA_real_, "plot") return(.getConditionalPowerLikelihoodRatesEnrichment( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piTreatmentRange = piTreatmentRange, piControls = piControls, iterations = iterations, seed = seed )) } else if (stageResults$isDatasetSurvival()) { .warnInCaseOfUnusedArgument(piTreatmentRange, "piTreatmentRange", NA_real_, "plot") .warnInCaseOfUnusedArgument(piControls, "piControls", NA_real_, "plot") .warnInCaseOfUnusedArgument(assumedStDevs, "assumedStDevs", NA_real_, "plot") return(.getConditionalPowerLikelihoodSurvivalEnrichment( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaRange = thetaRange, iterations = iterations, seed = seed )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(stageResults$.dataInput), "' is not implemented yet" ) } rpact/R/class_time.R0000644000176200001440000032755314445307575014067 0ustar liggesusers## | ## | *Time classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | C_REGEXP_GREATER_OR_EQUAL <- ">= ?" C_REGEXP_SMALLER <- "< ?" C_REGEXP_SMALLER_OR_EQUAL <- "<= ?" C_REGEXP_DECIMAL_NUMBER <- "\\d*(\\.{1}\\d*)?" TimeDefinition <- setRefClass("TimeDefinition", contains = "ParameterSet", methods = list( initialize = function(...) { callSuper(...) .parameterNames <<- C_PARAMETER_NAMES .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, .getRegexpFromTo = function(..., from, to, fromPrefix = "", toPrefix = "") { return(paste0("(^ *", fromPrefix, from, " *- *", toPrefix, to, " *$)")) }, .getRegexpSmallerThan = function() { return(paste0("(^ *", C_REGEXP_SMALLER, C_REGEXP_DECIMAL_NUMBER, " *$)")) }, .getRegexpDecimalNumber = function() { return(paste0("(^ *", C_REGEXP_DECIMAL_NUMBER, " *$)")) }, .getRegexpGreaterOrEqualThan = function() { return(paste0("(^ *", C_REGEXP_GREATER_OR_EQUAL, C_REGEXP_DECIMAL_NUMBER, " *$)")) }, .getRegexpDecimalRangeStart = function() { return(.getRegexpFromTo(from = "0", to = C_REGEXP_DECIMAL_NUMBER, toPrefix = C_REGEXP_SMALLER)) }, .getRegexpDecimalRange = function() { return(.getRegexpFromTo( from = C_REGEXP_DECIMAL_NUMBER, to = C_REGEXP_DECIMAL_NUMBER, toPrefix = C_REGEXP_SMALLER )) }, .getRegexpDecimalRangeEnd = function() { return(.getRegexpFromTo( from = C_REGEXP_DECIMAL_NUMBER, to = "(Inf|x|\\?)", toPrefix = paste0("(", C_REGEXP_SMALLER, " *)?") )) }, .getRegexpDecimalRangeFiniteEnd = function() { return(.getRegexpFromTo( from = C_REGEXP_DECIMAL_NUMBER, to = C_REGEXP_DECIMAL_NUMBER, toPrefix = "<=? ?" )) }, .getRegexpOr = function(...) { args <- list(...) if (length(args) == 0) { return("") } if (length(args) == 1) { return(args[[1]]) } return(paste(unlist(args, recursive = FALSE, use.names = FALSE), collapse = "|")) }, .validateTimePeriod = function(timePeriod, i, n, accrualTimeMode = FALSE) { endOfAccrualIsUndefined <- FALSE if (i == 1 && (n > 1 || !accrualTimeMode)) { if (!grepl(.getRegexpOr(.getRegexpSmallerThan(), .getRegexpDecimalRangeStart()), timePeriod, perl = TRUE )) { if (!accrualTimeMode && n == 1 && !grepl("(0 *- ?)?=time\", \"time - Inf\" or \"time1 - <=time2\", ", "e.g., \"20\", \">=20\" or \"20 - Inf\" or \"20 - <=30\"" ) } if (grepl(.getRegexpOr(.getRegexpGreaterOrEqualThan(), .getRegexpDecimalRangeEnd()), timePeriod, perl = TRUE )) { endOfAccrualIsUndefined <- TRUE } timePeriod <- gsub("([Inf >=\\?x]*)|-", "", timePeriod) } else { if (!grepl(.getRegexpOr(.getRegexpGreaterOrEqualThan(), .getRegexpDecimalRangeEnd()), timePeriod, perl = TRUE )) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the name of the last region must have the format ", "\">=time\" or \"time - Inf\", e.g., \">=20\" or \"20 - Inf\"" ) } } } else { if (!grepl(.getRegexpDecimalRange(), timePeriod, perl = TRUE)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the name of the inner regions must have the format \"time_1 - = 1" ) } } else if (accrualIntensityType == "relative") { absoluteAccrualIntensityEnabled <- FALSE } args <- list(...) showWarnings <- args[["showWarnings"]] if (is.null(showWarnings) || !is.logical(showWarnings)) { showWarnings <- TRUE } return(AccrualTime( accrualTime = accrualTime, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects, showWarnings = showWarnings, absoluteAccrualIntensityEnabled = absoluteAccrualIntensityEnabled )) } #' #' @name PiecewiseSurvivalTime #' #' @title #' Piecewise Exponential Survival Time #' #' @description #' Class for the definition of piecewise survival times. #' #' @template field_piecewiseSurvivalTime #' @template field_lambda1 #' @template field_lambda2 #' @template field_hazardRatio #' @template field_pi1_survival #' @template field_pi2_survival #' @template field_median1 #' @template field_median2 #' @template field_eventTime #' @template field_kappa #' @template field_piecewiseSurvivalEnabled #' @template field_delayedResponseAllowed #' @template field_delayedResponseEnabled #' #' @details #' \code{PiecewiseSurvivalTime} is a class for the definition of piecewise survival times. #' #' @include f_core_constants.R #' @include class_core_parameter_set.R #' @include f_core_utilities.R #' @include f_logger.R #' #' @keywords internal #' #' @importFrom methods new #' PiecewiseSurvivalTime <- setRefClass("PiecewiseSurvivalTime", contains = "TimeDefinition", fields = list( .pi1Default = "numeric", .lambdaBased = "logical", .silent = "logical", piecewiseSurvivalTime = "numeric", lambda1 = "numeric", lambda2 = "numeric", hazardRatio = "numeric", pi1 = "numeric", pi2 = "numeric", median1 = "numeric", median2 = "numeric", eventTime = "numeric", kappa = "numeric", piecewiseSurvivalEnabled = "logical", delayedResponseAllowed = "logical", delayedResponseEnabled = "logical" ), methods = list( initialize = function(piecewiseSurvivalTime = NA_real_, ..., lambda1 = NA_real_, lambda2 = NA_real_, hazardRatio = NA_real_, pi1 = NA_real_, pi2 = NA_real_, median1 = NA_real_, median2 = NA_real_, eventTime = C_EVENT_TIME_DEFAULT, kappa = 1, delayedResponseAllowed = FALSE) { callSuper( piecewiseSurvivalTime = NA_real_, lambda1 = lambda1, lambda2 = lambda2, hazardRatio = hazardRatio, pi1 = pi1, pi2 = pi2, median1 = median1, median2 = median2, eventTime = eventTime, kappa = kappa, delayedResponseAllowed = delayedResponseAllowed, ... ) if (length(piecewiseSurvivalTime) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must be defined (set to NA_real_ if not applicable)" ) } .stopInCaseOfConflictingArguments(lambda1, "lambda1", median1, "median1") .stopInCaseOfConflictingArguments(lambda2, "lambda2", median2, "median2") .stopInCaseOfConflictingArguments(pi1, "pi1", median1, "median1") .stopInCaseOfConflictingArguments(pi1, "pi1", median2, "median2") .stopInCaseOfConflictingArguments(pi1, "pi1", lambda1, "lambda1") .stopInCaseOfConflictingArguments(pi1, "pi1", lambda2, "lambda2") .stopInCaseOfConflictingArguments(pi2, "pi2", median1, "median1") .stopInCaseOfConflictingArguments(pi2, "pi2", median2, "median2") .stopInCaseOfConflictingArguments(pi2, "pi2", lambda1, "lambda1") .stopInCaseOfConflictingArguments(pi2, "pi2", lambda2, "lambda2") if (length(median1) > 0 && !all(is.na(median1))) { .self$lambda1 <<- getLambdaByMedian(median1, kappa = kappa) .setParameterType("median1", C_PARAM_USER_DEFINED) .setParameterType("lambda1", C_PARAM_GENERATED) } else { .setParameterType("median1", C_PARAM_NOT_APPLICABLE) .setParameterType("lambda1", ifelse(length(lambda1) == 1 && is.na(lambda1), C_PARAM_NOT_APPLICABLE, C_PARAM_USER_DEFINED )) } if (length(median2) > 0 && !all(is.na(median2))) { .self$lambda2 <<- getLambdaByMedian(median2, kappa = kappa) .setParameterType("median2", C_PARAM_USER_DEFINED) .setParameterType("lambda2", C_PARAM_GENERATED) } else { .setParameterType("median2", C_PARAM_NOT_APPLICABLE) .setParameterType("lambda2", C_PARAM_NOT_APPLICABLE) } args <- list(...) if (!is.null(args[[".pi1Default"]])) { .pi1Default <<- args[[".pi1Default"]] } if (!is.null(args[[".lambdaBased"]])) { .lambdaBased <<- args[[".lambdaBased"]] } if (!is.null(args[[".silent"]])) { .silent <<- args[[".silent"]] } else { .silent <<- FALSE } piecewiseSurvivalEnabled <<- FALSE delayedResponseEnabled <<- FALSE .setParameterType("piecewiseSurvivalTime", C_PARAM_NOT_APPLICABLE) .setParameterType("piecewiseSurvivalEnabled", C_PARAM_GENERATED) .setParameterType("delayedResponseEnabled", ifelse(isTRUE(delayedResponseAllowed), C_PARAM_GENERATED, C_PARAM_NOT_APPLICABLE )) .setParameterType("delayedResponseAllowed", ifelse(isTRUE(delayedResponseAllowed), C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE )) .setParameterType("pi1", C_PARAM_NOT_APPLICABLE) .setParameterType("pi2", C_PARAM_NOT_APPLICABLE) .setParameterType("eventTime", ifelse(length(eventTime) == 1 && is.na(eventTime), C_PARAM_NOT_APPLICABLE, ifelse(eventTime == C_EVENT_TIME_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED) )) .setParameterType("kappa", ifelse(length(kappa) == 1 && !is.na(kappa) && kappa == 1, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) .init(piecewiseSurvivalTime) if (.getParameterType("median1") == C_PARAM_USER_DEFINED && .getParameterType("lambda1") == C_PARAM_USER_DEFINED) { .setParameterType("lambda1", C_PARAM_GENERATED) } if (.getParameterType("median2") == C_PARAM_USER_DEFINED && .getParameterType("lambda2") == C_PARAM_USER_DEFINED) { .setParameterType("lambda2", C_PARAM_GENERATED) } if (!is.na(eventTime) && .getParameterType("pi1") != C_PARAM_USER_DEFINED && .getParameterType("pi1") != C_PARAM_DEFAULT_VALUE && .getParameterType("pi2") != C_PARAM_USER_DEFINED && .getParameterType("pi2") != C_PARAM_DEFAULT_VALUE) { if (.getParameterType("eventTime") == C_PARAM_USER_DEFINED) { warning("'eventTime' (", round(eventTime, 3), ") will be ignored", call. = FALSE) } .setParameterType("eventTime", C_PARAM_NOT_APPLICABLE) eventTime <<- NA_real_ } .validateCalculatedArguments() }, .validateCalculatedArguments = function() { if (.getParameterType("median1") == C_PARAM_USER_DEFINED) { if (!isTRUE(all.equal(getLambdaByMedian(median1, kappa = kappa), lambda1, tolerance = 1e-05))) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'lambda1' must be ", round(getLambdaByMedian(median1, kappa = kappa), 5), ", but is ", round(lambda1, 5) ) } if (!any(is.na(pi1)) && !isTRUE(all.equal(getPiByMedian(median1, eventTime = eventTime, kappa = kappa), pi1, tolerance = 1e-05 ))) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'pi1' must be ", round(getPiByMedian(median1, eventTime = eventTime, kappa = kappa), 5), ", but is ", round(pi1, 5) ) } } if (.getParameterType("median2") == C_PARAM_USER_DEFINED) { if (!isTRUE(all.equal(getLambdaByMedian(median2, kappa = kappa), lambda2, tolerance = 1e-05))) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'lambda2' must be ", round(getLambdaByMedian(median2, kappa = kappa), 5), ", but is ", round(lambda2, 5) ) } if (!is.na(pi2) && !isTRUE(all.equal(getPiByMedian(median2, eventTime = eventTime, kappa = kappa), pi2, tolerance = 1e-05 ))) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'pi2' must be ", round(getPiByMedian(median2, eventTime = eventTime, kappa = kappa), 5), ", but is ", round(pi2, 5) ) } } if (.getParameterType("lambda1") == C_PARAM_USER_DEFINED || .getParameterType("median1") == C_PARAM_USER_DEFINED || .getParameterType("lambda2") == C_PARAM_USER_DEFINED || .getParameterType("median2") == C_PARAM_USER_DEFINED) { if (!any(is.na(pi1))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'pi1' (", pi1, ") must be NA_real_") } if (.getParameterType("pi1") != C_PARAM_NOT_APPLICABLE) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "parameter type of 'pi1' (", .getParameterType("pi1"), ") must be C_PARAM_NOT_APPLICABLE" ) } if (!any(is.na(pi1))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'pi2' (", pi2, ") must be NA_real_") } if (.getParameterType("pi2") != C_PARAM_NOT_APPLICABLE) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "parameter type of 'pi2' (", .getParameterType("pi2"), ") must be C_PARAM_NOT_APPLICABLE" ) } if (!any(is.na(eventTime))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'eventTime' (", eventTime, ") must be NA_real_") } if (.getParameterType("eventTime") != C_PARAM_NOT_APPLICABLE) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "parameter type of 'eventTime' (", .getParameterType("eventTime"), ") must be C_PARAM_NOT_APPLICABLE" ) } } if (.getParameterType("hazardRatio") == C_PARAM_TYPE_UNKNOWN) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "parameter type of 'hazardRatio' (", hazardRatio, ") must be != C_PARAM_TYPE_UNKNOWN" ) } }, .stopInCaseOfConflictingArguments = function(arg1, argName1, arg2, argName2) { if (length(arg1) > 0 && !all(is.na(arg1)) && length(arg2) > 0 && !all(is.na(arg2))) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "it is not allowed to specify '", argName1, "' (", .arrayToString(arg1), ")", " and '", argName2, "' (", .arrayToString(arg2), ") concurrently" ) } }, .asDataFrame = function() { data <- data.frame( piecewiseSurvivalTime = piecewiseSurvivalTime, lambda1 = lambda1, lambda2 = lambda2 ) rownames(data) <- as.character(1:nrow(data)) colnames(data) <- c( "Start time", C_PARAMETER_NAMES["lambda1"], # Hazard rate (1) C_PARAMETER_NAMES["lambda2"] ) # Hazard rate (2) return(data) }, .isPiBased = function() { return(!.isLambdaBased()) }, .isLambdaBased = function(minNumberOfLambdas = 2) { if (.getParameterType("lambda2") == C_PARAM_USER_DEFINED || .getParameterType("median2") == C_PARAM_USER_DEFINED) { if (length(lambda2) >= minNumberOfLambdas && !any(is.na(lambda2))) { return(TRUE) } } return((length(pi1) == 0 || any(is.na(pi1))) && (length(pi2) == 0 || any(is.na(pi2)))) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing piecewise survival time objects" .resetCat() if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Piecewise exponential survival times:\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) if (!piecewiseSurvivalEnabled) { .cat(" Piecewise exponential survival is disabled.\n\n", consoleOutputEnabled = consoleOutputEnabled) } else if (length(piecewiseSurvivalTime) == 1) { .cat(" At all times:", lambda2[1], "\n\n", consoleOutputEnabled = consoleOutputEnabled) } else { piecewiseSurvivalTimeStr <- format(piecewiseSurvivalTime) lambda2Str <- format(lambda2) for (i in 1:length(piecewiseSurvivalTime)) { if (i < length(piecewiseSurvivalTime)) { .cat(" ", piecewiseSurvivalTimeStr[i], " - <", piecewiseSurvivalTimeStr[i + 1], ": ", lambda2Str[i], "\n", sep = "", consoleOutputEnabled = consoleOutputEnabled ) } else { .cat(" ", rep(" ", 2 + max(nchar(piecewiseSurvivalTimeStr))), ">=", piecewiseSurvivalTimeStr[i], ": ", lambda2Str[i], "\n", sep = "", consoleOutputEnabled = consoleOutputEnabled ) } } if (delayedResponseEnabled) { .cat("Delayed response is enabled.\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("Details:\n\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), "Generated parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } }, .toString = function(startWithUpperCase = FALSE) { s <- "piecewise survival time" return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) }, isDelayedResponseEnabled = function() { return(delayedResponseEnabled) }, isPiecewiseSurvivalEnabled = function() { if (length(piecewiseSurvivalTime) == 0) { return(FALSE) } if (length(piecewiseSurvivalTime) == 1 && is.na(piecewiseSurvivalTime)) { return(FALSE) } return(TRUE) }, .initFromList = function(pwSurvTimeList) { if (!is.list(pwSurvTimeList)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must be a list") } if (length(pwSurvTimeList) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must contain at least one entry" ) } if (is.null(names(pwSurvTimeList))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must be a named list") } if (!all(is.na(lambda2))) { warning("'lambda2' (", .arrayToString(lambda2), ") will be ignored because 'piecewiseSurvivalTime' is a list", call. = FALSE ) } pwSurvStartTimes <- c(0) pwSurvLambda2 <- c() pwSurvTimeNames <- names(pwSurvTimeList) for (i in 1:length(pwSurvTimeNames)) { timePeriod <- pwSurvTimeNames[i] lambdaValue <- pwSurvTimeList[[timePeriod]] .assertIsSingleNumber(lambdaValue, paste0("pwSurvLambda[", i, "]")) timePeriod <- .validateTimePeriod(timePeriod, i = i, n = length(pwSurvTimeNames)) if (i < length(pwSurvTimeNames)) { parts <- strsplit(timePeriod, "- *(< *)?", perl = TRUE)[[1]] if (length(parts) != 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all regions (", timePeriod, ") must have the format ", "\"time_1 - 1 && delayedResponseAllowed) { if (length(hazardRatio) != length(pwSurvLambda2)) { warning("Only the first 'hazardRatio' (", round(hazardRatio[1], 4), ") was used for piecewise survival time definition ", "(use a loop over the function to simulate different hazard ratios)", call. = FALSE ) hazardRatio <<- hazardRatio[1] } else { delayedResponseEnabled <<- TRUE } lambda1 <<- pwSurvLambda2 * hazardRatio^(1 / kappa) .setParameterType("lambda1", C_PARAM_GENERATED) } else { lambda1 <<- NA_real_ .setParameterType("lambda1", C_PARAM_NOT_APPLICABLE) } lambda2 <<- pwSurvLambda2 .setParameterType("lambda2", C_PARAM_USER_DEFINED) piecewiseSurvivalEnabled <<- !identical(piecewiseSurvivalTime, 0) }, .init = function(pwSurvTime) { .logDebug("pwSurvTime %s, %s", ifelse(is.numeric(pwSurvTime), .arrayToString(pwSurvTime), pwSurvTime ), .getClassName(pwSurvTime[1])) .logDebug("lambda1 %s, %s", lambda1, .getParameterType("lambda1")) .logDebug("lambda2 %s, %s", lambda2, .getParameterType("lambda2")) # case 1: lambda1 and lambda2 = NA or generated if (length(pwSurvTime) == 1 && (is.na(pwSurvTime) || is.numeric(pwSurvTime)) && (all(is.na(lambda1)) || .getParameterType("lambda1") == C_PARAM_GENERATED) && length(lambda2) == 1 && (is.na(lambda2) || .getParameterType("lambda2") == C_PARAM_GENERATED) ) { .logDebug(".init, case 1: lambda1 and lambda2 = NA") if (!is.null(.lambdaBased) && isTRUE(.lambdaBased)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'lambda1' and 'lambda2' must be specified") } if (!any(is.na(hazardRatio))) { .setParameterType("hazardRatio", C_PARAM_USER_DEFINED) } if (!is.na(pwSurvTime)) { warning("'piecewiseSurvivalTime' (", pwSurvTime, ") will be ignored") } if (is.na(pi2)) { if (!is.na(median2) || !any(is.na(median1))) { .logDebug(".init: set pi2 to 'not applicable'") .setParameterType("pi2", C_PARAM_NOT_APPLICABLE) } else { .logDebug(".init: set pi2 to default") pi2 <<- C_PI_2_DEFAULT .setParameterType("pi2", C_PARAM_DEFAULT_VALUE) } } else { .assertIsSingleNumber(pi2, "pi2") .setParameterType("pi2", ifelse(pi2 == C_PI_2_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) if (!any(is.na(median2))) { warning("'median2' (", .arrayToString(median2), ") will be ignored") median2 <<- NA_real_ } } hazardRatioCalculationEnabled <- TRUE if (all(is.na(pi1))) { if (length(hazardRatio) > 0 && !all(is.na(hazardRatio))) { .setParameterType("hazardRatio", C_PARAM_USER_DEFINED) hazardRatioCalculationEnabled <- FALSE } if (!any(is.na(median1))) { .logDebug(".init: set pi1 to 'not applicable'") .setParameterType("pi1", C_PARAM_NOT_APPLICABLE) if (is.na(median2)) { if (any(is.na(hazardRatio))) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'hazardRatio', 'lambda2', or 'median2' must be specified" ) } if (length(hazardRatio) != length(median1)) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'hazardRatio' (", .arrayToString(hazardRatio), ") must be ", "equal to length of 'median1' (", .arrayToString(median1), ")" ) } .logDebug(".init: calculate lambda2 and median2 by median1") lambda2 <<- getLambdaByMedian(median1, kappa) / hazardRatio^(1 / kappa) if (!delayedResponseAllowed && length(unique(round(lambda2, 8))) > 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'lambda2' can only be calculated if 'unique(lambda1 / hazardRatio^(1 / kappa))' ", "result in a single value; current result = ", .arrayToString(round(lambda2, 4), vectorLookAndFeelEnabled = TRUE), " (e.g., delayed response is not allowed)" ) } median2 <<- getMedianByLambda(lambda2, kappa) .setParameterType("lambda2", C_PARAM_GENERATED) .setParameterType("median2", C_PARAM_GENERATED) } } else if (length(hazardRatio) > 0 && !all(is.na(hazardRatio))) { .setParameterType("pi1", C_PARAM_NOT_APPLICABLE) if (!any(is.na(lambda1))) { .logDebug(".init: calculate median1 by lambda1") median1 <<- getMedianByLambda(lambda1, kappa) .setParameterType("median1", C_PARAM_GENERATED) } else if (!is.na(median2)) { .logDebug(".init: calculate lambda1 and median1 by median2") lambda1 <<- getLambdaByMedian(median2, kappa) * hazardRatio^(1 / kappa) median1 <<- getMedianByLambda(lambda1, kappa) .setParameterType("lambda1", C_PARAM_GENERATED) .setParameterType("median1", C_PARAM_GENERATED) } } else { .logDebug(".init: set pi1 to default") if (!is.null(.pi1Default) && is.numeric(.pi1Default) && length(.pi1Default) > 0) { pi1 <<- .pi1Default } else { pi1 <<- C_PI_1_SAMPLE_SIZE_DEFAULT } .setParameterType("pi1", C_PARAM_DEFAULT_VALUE) } } else { .assertIsNumericVector(pi1, "pi1") if (!any(is.na(median1))) { .logDebug(".init: set median1 to NA") warning("'median1' (", .arrayToString(median1), ") will be ignored") median1 <<- NA_real_ } } if (hazardRatioCalculationEnabled) { if (length(hazardRatio) > 0 && !all(is.na(hazardRatio))) { warning("'hazardRatio' (", .arrayToString(hazardRatio), ") will be ignored because it will be calculated", call. = FALSE ) } if (!any(is.na(lambda1)) && !is.na(lambda2)) { .logDebug(".init: calculate hazardRatio by lambda1 and lambda2") hazardRatio <<- (lambda1 / lambda2)^kappa .setParameterType("hazardRatio", C_PARAM_GENERATED) } else if (!any(is.na(pi1)) && !is.na(pi2)) { .logDebug(".init: calculate hazardRatio by pi1 and pi2") hazardRatio <<- getHazardRatioByPi(pi1, pi2, eventTime, kappa = kappa) .setParameterType("hazardRatio", C_PARAM_GENERATED) } } if (length(pi1) > 0 && !any(is.na(pi1))) { pi1Default <- C_PI_1_SAMPLE_SIZE_DEFAULT if (!is.null(.pi1Default) && is.numeric(.pi1Default) && length(.pi1Default) > 0) { pi1Default <- .pi1Default } if (identical(pi1, pi1Default)) { .setParameterType("pi1", C_PARAM_DEFAULT_VALUE) } else if (hazardRatioCalculationEnabled && .getParameterType("pi1") != C_PARAM_GENERATED) { .setParameterType("pi1", C_PARAM_USER_DEFINED) } } if (length(pi2) == 1 && !is.na(pi2)) { if (length(eventTime) == 1 && !is.na(eventTime)) { lambda2 <<- getLambdaByPi(pi2, eventTime, kappa = kappa) .setParameterType("lambda2", C_PARAM_GENERATED) } if (length(pi1) == 1 && is.na(pi1) && !any(is.na(hazardRatio))) { pi1 <<- getPiByLambda( getLambdaByPi( pi2, eventTime, kappa = kappa ) * hazardRatio^(1 / kappa), eventTime, kappa = kappa ) .setParameterType("pi1", C_PARAM_GENERATED) } if (length(pi1) > 0 && !any(is.na(pi1)) && length(eventTime) == 1 && !is.na(eventTime)) { lambda1 <<- getLambdaByPi(pi1, eventTime, kappa = kappa) .setParameterType("lambda1", C_PARAM_GENERATED) } } .initMedian() return(invisible()) } if (length(pwSurvTime) == 1 && is.na(pwSurvTime)) { pwSurvTime <- NA_real_ } if (is.list(pwSurvTime)) { .assertIsValidHazardRatioVector(hazardRatio) .initFromList(pwSurvTime) .initHazardRatio() if (!piecewiseSurvivalEnabled) { .initPi() .initMedian() } } else if (delayedResponseAllowed && length(lambda2) == 1 && !is.na(lambda2) && length(hazardRatio) > 0 && (all(is.na(pwSurvTime)) || identical(pwSurvTime, 0))) { .logDebug(".init, case 2: delayedResponseAllowed") piecewiseSurvivalEnabled <<- FALSE if (!all(is.na(pwSurvTime)) && !identical(pwSurvTime, 0)) { warning("'piecewiseSurvivalTime' (", .arrayToString(pwSurvTime), ") will be ignored") } piecewiseSurvivalTime <<- 0 .initPi() .initHazardRatio() .initMedian() } else if (!is.numeric(pwSurvTime)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must be a list, a numeric value, or vector" ) } else { piecewiseSurvivalTime <<- pwSurvTime if ((all(is.na(piecewiseSurvivalTime)) || identical(piecewiseSurvivalTime, 0)) && length(lambda2) == 1 && !is.na(lambda2)) { .logDebug(".init, case 3: piecewise survival is disabled") if (!all(is.na(piecewiseSurvivalTime)) && !identical(piecewiseSurvivalTime, 0)) { warning("'piecewiseSurvivalTime' (", .arrayToString(piecewiseSurvivalTime), ") will be ignored") } piecewiseSurvivalTime <<- 0 .setParameterType("piecewiseSurvivalTime", C_PARAM_DEFAULT_VALUE) piecewiseSurvivalEnabled <<- FALSE .initHazardRatio() .initPi() .initMedian() } else { .logDebug(".init, case 3: piecewise survival is enabled") if (all(is.na(piecewiseSurvivalTime))) { if (.getParameterType("median1") == C_PARAM_USER_DEFINED) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'median1' (", .arrayToString(median1), ") with length > 1 can only ", "defined together with a single 'median2', 'lambda2' or 'pi2'" ) } if (delayedResponseAllowed && length(lambda1 > 0) && !all(is.na(lambda1)) && length(lambda1) != length(lambda2) && delayedResponseAllowed) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'lambda1' (", length(lambda1), "), 'lambda2' (", length(lambda2), "), and ", "'piecewiseSurvivalTime' (", length(piecewiseSurvivalTime), ") must be equal" ) } stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'piecewiseSurvivalTime' must be specified" ) } .setParameterType("piecewiseSurvivalTime", C_PARAM_USER_DEFINED) piecewiseSurvivalEnabled <<- TRUE .initHazardRatio() .initPi() } } if (piecewiseSurvivalEnabled) { for (param in c("pi", "median")) { for (group in 1:2) { paramName <- paste0(param, group) if (.getParameterType(paramName) == C_PARAM_USER_DEFINED) { warning( "'", paramName, "' (", .arrayToString(.self[[paramName]]), ") ", "was converted to 'lambda", group, "' ", "and is not available in output because piecewise ", "exponential survival time is enabled" ) } } } pi1 <<- NA_real_ pi2 <<- NA_real_ median1 <<- NA_real_ median2 <<- NA_real_ .setParameterType("pi1", C_PARAM_NOT_APPLICABLE) .setParameterType("pi2", C_PARAM_NOT_APPLICABLE) .setParameterType("median1", C_PARAM_NOT_APPLICABLE) .setParameterType("median2", C_PARAM_NOT_APPLICABLE) .setParameterType("eventTime", C_PARAM_NOT_APPLICABLE) if (!is.na(eventTime) && eventTime != C_EVENT_TIME_DEFAULT) { warning("Event time (", eventTime, ") will be ignored because it is not ", "applicable for piecewise exponential survival time", call. = FALSE ) eventTime <<- C_EVENT_TIME_DEFAULT } } .validateInitialization() }, .initMedian = function() { if (length(eventTime) == 1 && !is.na(eventTime)) { if (length(pi1) > 0 && !all(is.na(pi1)) && .getParameterType("median1") != C_PARAM_USER_DEFINED) { median1 <<- getMedianByPi(pi1, eventTime, kappa = kappa) .setParameterType("median1", C_PARAM_GENERATED) } if (length(pi2) == 1 && !is.na(pi2) && .getParameterType("median2") != C_PARAM_USER_DEFINED) { median2 <<- getMedianByPi(pi2, eventTime, kappa = kappa) .setParameterType("median2", C_PARAM_GENERATED) } } else { if (length(lambda1) > 0 && !all(is.na(lambda1)) && .getParameterType("median1") != C_PARAM_USER_DEFINED) { median1 <<- getMedianByLambda(lambda1, kappa = kappa) .setParameterType("median1", C_PARAM_GENERATED) } if (length(lambda2) == 1 && !is.na(lambda2) && .getParameterType("median2") != C_PARAM_USER_DEFINED) { median2 <<- getMedianByLambda(lambda2, kappa = kappa) .setParameterType("median2", C_PARAM_GENERATED) } } }, .initPi = function() { .logDebug(".initPi: set pi1, pi2, and eventTime to NA") if (!is.na(eventTime) && .getParameterType("eventTime") == C_PARAM_USER_DEFINED) { warning("'eventTime' (", round(eventTime, 3), ") will be ignored", call. = FALSE) } if (!is.na(pi1) && !identical(pi2, C_PI_1_DEFAULT) && !identical(pi2, C_PI_1_SAMPLE_SIZE_DEFAULT)) { warning("'pi1' (", .arrayToString(pi1), ") will be ignored", call. = FALSE) } if (!is.na(pi2) && pi2 != C_PI_2_DEFAULT) { warning("'pi2' (", pi2, ") will be ignored", call. = FALSE) } .setParameterType("eventTime", C_PARAM_NOT_APPLICABLE) .setParameterType("pi1", C_PARAM_NOT_APPLICABLE) .setParameterType("pi2", C_PARAM_NOT_APPLICABLE) eventTime <<- NA_real_ pi1 <<- NA_real_ pi2 <<- NA_real_ if (length(lambda2) == 0 || any(is.na(lambda2))) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'lambda2' must be defined before .initPi() can be called" ) } .setParameterType("lambda2", C_PARAM_USER_DEFINED) if (piecewiseSurvivalEnabled && length(hazardRatio) > 1) { return(invisible()) } if (length(lambda1) == 0 || any(is.na(lambda1))) { if (length(hazardRatio) > 0 && !any(is.na(hazardRatio))) { .logDebug(".initPi: calculate lambda1 by hazardRatio") lambda1 <<- lambda2 * hazardRatio^(1 / kappa) .setParameterType("lambda1", C_PARAM_GENERATED) } else if (length(lambda1) == 0) { lambda1 <<- NA_real_ } else if (delayedResponseAllowed) { .setParameterType("lambda1", C_PARAM_USER_DEFINED) } } }, .initHazardRatio = function() { .logDebug(".initHazardRatio") if (!is.null(hazardRatio) && length(hazardRatio) > 0 && !all(is.na(hazardRatio))) { if ((length(lambda1) == 1 && is.na(lambda1)) || .getParameterType("lambda1") == C_PARAM_GENERATED) { .setParameterType("hazardRatio", C_PARAM_USER_DEFINED) return(invisible()) } if (!.silent) { warning("'hazardRatio' (", .arrayToString(hazardRatio), ") will be ignored because it will be calculated", call. = FALSE ) } } if (any(is.na(lambda2))) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'lambda2' must be specified") } if (any(is.na(lambda1))) { if (delayedResponseAllowed && any(is.na(hazardRatio) && !any(is.na(piecewiseSurvivalTime)) && length(lambda2) == length(piecewiseSurvivalTime))) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'hazardRatio' must be specified") } if (any(is.na(hazardRatio))) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'hazardRatio', 'lambda1' or 'median1' must be specified" ) } stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'lambda1' must be specified") } .setParameterType("lambda1", C_PARAM_USER_DEFINED) hr <- unique(round(lambda1 / lambda2, 8)^kappa) if (length(hr) != 1) { if (length(lambda2) == 1 && length(lambda1) > 1) { hazardRatio <<- (lambda1 / lambda2)^kappa .setParameterType("hazardRatio", C_PARAM_GENERATED) return(invisible()) } else if (delayedResponseAllowed) { hazardRatio <<- (lambda1 / lambda2)^kappa .setParameterType("hazardRatio", C_PARAM_GENERATED) delayedResponseEnabled <<- TRUE return(invisible()) } else { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'hazardRatio' can only be calculated if 'unique(lambda1 / lambda2)' ", "result in a single value; current result = ", .arrayToString(round(hr, 4), vectorLookAndFeelEnabled = TRUE), " (e.g., delayed response is not allowed)" ) } } hazardRatio <<- ((lambda1 / lambda2)^kappa)[1] .setParameterType("hazardRatio", C_PARAM_GENERATED) }, .validateInitialization = function() { if (length(piecewiseSurvivalTime) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must contain at least one survival start time" ) } if (any(is.na(piecewiseSurvivalTime))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' must contain valid survival start times" ) } if (piecewiseSurvivalTime[1] != 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the first value of 'piecewiseSurvivalTime' must be 0" ) } if (length(piecewiseSurvivalTime) != length(lambda2)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piecewiseSurvivalTime' (", length(piecewiseSurvivalTime), ") and length of 'lambda2' (", length(lambda2), ") must be equal" ) } .assertValuesAreStrictlyIncreasing(piecewiseSurvivalTime, "piecewiseSurvivalTime") if ((length(lambda1) != 1 || is.na(lambda1)) && !(.getParameterType("lambda1") %in% c(C_PARAM_GENERATED, C_PARAM_USER_DEFINED))) { if (length(hazardRatio) == 1 && !is.na(hazardRatio)) { lambda1 <<- lambda2 * hazardRatio^(1 / kappa) .setParameterType("lambda1", C_PARAM_GENERATED) } else if (length(hazardRatio) > 1 && delayedResponseAllowed && !is.na(hazardRatio[1])) { if (!delayedResponseEnabled && .isLambdaBased()) { if (delayedResponseAllowed) { if (length(hazardRatio) != length(lambda2)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'hazardRatio' (", length(hazardRatio), ") and length of 'lambda2' (", length(lambda2), ") must be equal" ) } delayedResponseEnabled <<- TRUE } else { warning("Only the first 'hazardRatio' (", round(hazardRatio[1], 4), ") was used for piecewise survival time definition", call. = FALSE ) hazardRatio <<- hazardRatio[1] } lambda1 <<- lambda2 * hazardRatio^(1 / kappa) .setParameterType("lambda1", C_PARAM_GENERATED) } } else if (!delayedResponseEnabled && !(length(lambda2) == 1 && length(lambda1) > 1)) { if (length(lambda1) > 1) { warning("'lambda1' (", .arrayToString(lambda1), ") will be ignored", call. = FALSE ) } lambda1 <<- NA_real_ .setParameterType("lambda1", C_PARAM_NOT_APPLICABLE) } } else if (length(hazardRatio) == 1 && !is.na(hazardRatio) && length(lambda1) > 0 && !any(is.na(lambda1)) && length(lambda2) > 0 && !any(is.na(lambda2))) { target <- lambda2 * hazardRatio^(1 / kappa) if (length(lambda1) > 0 && !all(is.na(lambda1)) && !isTRUE(all.equal(target, lambda1))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'lambda1' (", .arrayToString(lambda1), ") ", "is not as expected (", .arrayToString(target), ") for given hazard ratio ", hazardRatio ) } } if (piecewiseSurvivalEnabled && !(length(lambda1) == 1 && is.na(lambda1)) && length(piecewiseSurvivalTime) != length(lambda1)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piecewiseSurvivalTime' (", length(piecewiseSurvivalTime), ") and length of 'lambda1' (", length(lambda1), ") must be equal" ) } } ) ) #' #' @name AccrualTime #' #' @title #' Accrual Time #' #' @description #' Class for the definition of accrual time and accrual intensity. #' #' @template field_endOfAccrualIsUserDefined #' @template field_followUpTimeMustBeUserDefined #' @template field_maxNumberOfSubjectsIsUserDefined #' @template field_maxNumberOfSubjectsCanBeCalculatedDirectly #' @template field_absoluteAccrualIntensityEnabled #' @template field_accrualTime #' @template field_accrualIntensity #' @template field_accrualIntensityRelative #' @template field_maxNumberOfSubjects #' @template field_remainingTime #' @template field_piecewiseAccrualEnabled #' #' @details #' \code{AccrualTime} is a class for the definition of accrual time and accrual intensity. #' #' @include f_core_constants.R #' @include f_core_utilities.R #' @include class_core_parameter_set.R #' #' @keywords internal #' #' @importFrom methods new #' AccrualTime <- setRefClass("AccrualTime", contains = "TimeDefinition", fields = list( .showWarnings = "logical", endOfAccrualIsUserDefined = "logical", followUpTimeMustBeUserDefined = "logical", maxNumberOfSubjectsIsUserDefined = "logical", maxNumberOfSubjectsCanBeCalculatedDirectly = "logical", absoluteAccrualIntensityEnabled = "logical", accrualTime = "numeric", accrualIntensity = "numeric", accrualIntensityRelative = "numeric", maxNumberOfSubjects = "numeric", remainingTime = "numeric", piecewiseAccrualEnabled = "logical" ), methods = list( initialize = function(accrualTime = NA_real_, ..., accrualIntensity = NA_real_, maxNumberOfSubjects = NA_real_, showWarnings = TRUE, absoluteAccrualIntensityEnabled = NA) { callSuper( accrualTime = NA_real_, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects, .showWarnings = showWarnings, absoluteAccrualIntensityEnabled = absoluteAccrualIntensityEnabled, ... ) endOfAccrualIsUserDefined <<- NA followUpTimeMustBeUserDefined <<- NA maxNumberOfSubjectsIsUserDefined <<- NA maxNumberOfSubjectsCanBeCalculatedDirectly <<- TRUE # absoluteAccrualIntensityEnabled <<- NA .setParameterType("endOfAccrualIsUserDefined", C_PARAM_GENERATED) .setParameterType("followUpTimeMustBeUserDefined", C_PARAM_GENERATED) .setParameterType("maxNumberOfSubjectsIsUserDefined", C_PARAM_GENERATED) .setParameterType("maxNumberOfSubjectsCanBeCalculatedDirectly", C_PARAM_GENERATED) .setParameterType( "absoluteAccrualIntensityEnabled", ifelse(is.na(absoluteAccrualIntensityEnabled), C_PARAM_GENERATED, C_PARAM_USER_DEFINED) ) accrualIntensityRelative <<- NA_real_ .setParameterType("accrualIntensityRelative", C_PARAM_NOT_APPLICABLE) remainingTime <<- NA_real_ .init(accrualTime) # case 6 correction if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !.self$absoluteAccrualIntensityEnabled) { remainingTime <<- NA_real_ .setParameterType("remainingTime", C_PARAM_NOT_APPLICABLE) .self$accrualTime <<- .self$accrualTime[1:length(.self$accrualIntensity)] } .initAccrualIntensityAbsolute() .validateFormula() .showWarningIfCaseIsNotAllowd() }, .asDataFrame = function() { accrualIntensityTemp <- accrualIntensity if (!all(is.na(accrualIntensityRelative))) { accrualIntensityTemp <- accrualIntensityRelative } if (length(accrualIntensityTemp) + 1 == length(accrualTime)) { accrualIntensityTemp <- c(accrualIntensityTemp, NA_real_) } data <- data.frame( accrualTime = accrualTime, accrualIntensity = accrualIntensityTemp ) rownames(data) <- as.character(1:nrow(data)) colnames(data) <- c( "Start time", C_PARAMETER_NAMES["accrualIntensity"] ) return(data) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .isAbsoluteAccrualIntensity = function(x) { return(!.isRelativeAccrualIntensity(x)) }, .isRelativeAccrualIntensity = function(x) { return(all(x < 1)) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing accrual time objects" .resetCat() if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Accrual time and intensity:\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) if (!isAccrualTimeEnabled()) { .cat(" Accrual time is disabled.\n", consoleOutputEnabled = consoleOutputEnabled) } else if (length(accrualTime) == 1) { .cat(" At all times:", accrualIntensity[1], "\n", consoleOutputEnabled = consoleOutputEnabled) } else { accrualTimeStr <- format(accrualTime) accrualIntensityStr <- format(accrualIntensity) for (i in 1:length(accrualTime)) { prefix <- ifelse(i == length(accrualTime) - 1, "<=", " <") suffix <- "" if (!maxNumberOfSubjectsIsUserDefined) { suffix <- " " } if (i < length(accrualTime)) { .cat(" ", accrualTimeStr[i], " - ", prefix, accrualTimeStr[i + 1], suffix, ": ", accrualIntensityStr[i], "\n", consoleOutputEnabled = consoleOutputEnabled ) } else if (!maxNumberOfSubjectsIsUserDefined && !is.na(accrualIntensityStr[i]) && accrualIntensityStr[i] != "NA") { .cat(" ", accrualTimeStr[i], " - <=[?]: ", accrualIntensityStr[i], "\n", consoleOutputEnabled = consoleOutputEnabled ) } } .cat("", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) if (isAccrualTimeEnabled()) { .showFormula(consoleOutputEnabled = consoleOutputEnabled) .cat("\n", consoleOutputEnabled = consoleOutputEnabled) .showCase(consoleOutputEnabled = consoleOutputEnabled) .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("Details:\n\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), "Generated parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } }, .getFormula = function() { s <- "" for (i in 1:length(accrualTime)) { if (i < length(accrualTime)) { s <- paste0( s, (round(accrualTime[i + 1], 4) - round(accrualTime[i], 4)), " * ", round(accrualIntensity[i], 4) ) if (!absoluteAccrualIntensityEnabled && (!maxNumberOfSubjectsIsUserDefined || !endOfAccrualIsUserDefined)) { s <- paste0(s, " * c ") } if (i < length(accrualIntensity)) { s <- paste0(s, " + ") } } } return(s) }, .validateFormula = function() { if (is.na(maxNumberOfSubjects) || length(accrualTime) != length(accrualIntensity) + 1) { return(invisible()) } numberOfSubjects <- 0 for (i in 1:length(accrualTime)) { if (i < length(accrualTime)) { numberOfSubjects <- numberOfSubjects + (accrualTime[i + 1] - accrualTime[i]) * accrualIntensity[i] } } if (!isTRUE(all.equal(numberOfSubjects, maxNumberOfSubjects, tolerance = 1e-03)) && absoluteAccrualIntensityEnabled) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") disagrees with ", "the defined accrual time and intensity: ", .getFormula(), " = ", numberOfSubjects ) } }, .showWarningIfCaseIsNotAllowd = function() { caseIsAllowed <- TRUE if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { caseIsAllowed <- FALSE } else if (!endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && followUpTimeMustBeUserDefined && !absoluteAccrualIntensityEnabled) { caseIsAllowed <- FALSE } if (!caseIsAllowed) { warning("The specified accrual time and intensity cannot be ", "supplemented automatically with the missing information; ", "therefore further calculations are not possible", call. = FALSE ) } }, .showFormula = function(consoleOutputEnabled) { .cat("Formula:\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(" ", consoleOutputEnabled = consoleOutputEnabled) .cat("maxNumberOfSubjects = ", consoleOutputEnabled = consoleOutputEnabled) if (!is.na(maxNumberOfSubjects)) { .cat(maxNumberOfSubjects, " = ", consoleOutputEnabled = consoleOutputEnabled) } .cat(.getFormula(), consoleOutputEnabled = consoleOutputEnabled) if (length(accrualTime) == length(accrualIntensity)) { .cat("(x - ", accrualTime[length(accrualTime)], ") * ", accrualIntensity[length(accrualIntensity)], consoleOutputEnabled = consoleOutputEnabled ) if (!absoluteAccrualIntensityEnabled && (!maxNumberOfSubjectsIsUserDefined || !endOfAccrualIsUserDefined)) { .cat(" * c ", consoleOutputEnabled = consoleOutputEnabled) } .cat(", where 'x' is the unknown last accrual time", consoleOutputEnabled = consoleOutputEnabled ) if (!absoluteAccrualIntensityEnabled && (!maxNumberOfSubjectsIsUserDefined || !endOfAccrualIsUserDefined)) { .cat(" and 'c' a constant factor", consoleOutputEnabled = consoleOutputEnabled) } } else if (!absoluteAccrualIntensityEnabled && (!maxNumberOfSubjectsIsUserDefined || !endOfAccrualIsUserDefined)) { .cat(", where 'c' is a constant factor", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) }, .showCase = function(consoleOutputEnabled = TRUE) { caseIsAllowed <- TRUE prefix <- " " # Case 1 # example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33), # maxNumberOfSubjects = 1000) if (endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && absoluteAccrualIntensityEnabled) { .cat("Case (#1):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "End of accrual, absolute accrual intensity and 'maxNumberOfSubjects' are given, ", " 'followUpTime'** shall be calculated.\n", consoleOutputEnabled = consoleOutputEnabled ) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6, 30), ", "accrualIntensity = c(22, 33), maxNumberOfSubjects = 924)\n", consoleOutputEnabled = consoleOutputEnabled ) } # Case 2 # example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33), # maxNumberOfSubjects = 1000) else if (endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { .cat("Case (#2):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "End of accrual, relative accrual intensity and 'maxNumberOfSubjects' are given, ", "absolute accrual intensity* and 'followUpTime'** shall be calculated.\n", consoleOutputEnabled = consoleOutputEnabled ) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6, 30), ", "accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000)\n", consoleOutputEnabled = consoleOutputEnabled ) } # Case 3 # example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33)) else if (endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && absoluteAccrualIntensityEnabled) { .cat("Case (#3):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "End of accrual and absolute accrual intensity are given, ", "'maxNumberOfSubjects'* and 'followUpTime'** shall be calculated.\n", consoleOutputEnabled = consoleOutputEnabled ) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(22, 33))\n", consoleOutputEnabled = consoleOutputEnabled ) } # Case 4 # example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33)) else if (endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { .cat("Case (#4):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "End of accrual, relative accrual intensity and 'followUpTime' are given, ", "absolute accrual intensity** and 'maxNumberOfSubjects'** shall be calculated.\n", consoleOutputEnabled = consoleOutputEnabled ) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6, 30), accrualIntensity = c(0.22, 0.33))\n", consoleOutputEnabled = consoleOutputEnabled ) } # Case 5 # example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33), # maxNumberOfSubjects = 1000) else if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && absoluteAccrualIntensityEnabled) { .cat("Case (#5):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "'maxNumberOfSubjects' and absolute accrual intensity are given, ", "end of accrual* and 'followUpTime'** shall be calculated\n", consoleOutputEnabled = consoleOutputEnabled ) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6), ", "accrualIntensity = c(22, 33), maxNumberOfSubjects = 1000)\n", consoleOutputEnabled = consoleOutputEnabled ) } # Case 6 # example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33), # maxNumberOfSubjects = 1000) else if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { caseIsAllowed <- FALSE .cat("Case (#6):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "'maxNumberOfSubjects' and relative accrual intensity are given, ", "absolute accrual intensity@, end of accrual* and 'followUpTime'** shall be calculated\n", consoleOutputEnabled = consoleOutputEnabled ) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6), ", "accrualIntensity = c(0.22, 0.33), maxNumberOfSubjects = 1000)\n", consoleOutputEnabled = consoleOutputEnabled ) } # Case 7 # example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33)) else if (!endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && followUpTimeMustBeUserDefined && absoluteAccrualIntensityEnabled) { .cat("Case (#7):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "'followUpTime' and absolute accrual intensity are given, ", "end of accrual** and 'maxNumberOfSubjects'** shall be calculated\n", consoleOutputEnabled = consoleOutputEnabled ) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(22, 33))\n", consoleOutputEnabled = consoleOutputEnabled ) } # Case 8 # example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33)) else if (!endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && followUpTimeMustBeUserDefined && !absoluteAccrualIntensityEnabled) { caseIsAllowed <- FALSE .cat("Case (#8):\n", sep = "", heading = 1, consoleOutputEnabled = consoleOutputEnabled) .cat(prefix, "'followUpTime' and relative accrual intensity are given, ", "absolute accrual intensity@, end of accrual and 'maxNumberOfSubjects' shall be calculated\n", consoleOutputEnabled = consoleOutputEnabled ) .cat(prefix, "Example: getAccrualTime(accrualTime = c(0, 6), accrualIntensity = c(0.22, 0.33))\n", consoleOutputEnabled = consoleOutputEnabled ) } # .cat("\n", consoleOutputEnabled = consoleOutputEnabled) if (!caseIsAllowed) { .cat(prefix, "(@) Cannot be calculated.\n", consoleOutputEnabled = consoleOutputEnabled ) } .cat(prefix, "(*) Can be calculated directly.\n", consoleOutputEnabled = consoleOutputEnabled ) .cat(prefix, "(**) Cannot be calculated directly but with ", "'getSampleSizeSurvival()' or 'getPowerSurvival()'.\n", consoleOutputEnabled = consoleOutputEnabled ) }, .followUpTimeShallBeCalculated = function() { # Case 1: 'followUpTime'** shall be calculated if (endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && absoluteAccrualIntensityEnabled) { return(TRUE) } # Case 2: 'followUpTime'** shall be calculated else if (endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { return(TRUE) } # Case 3: 'followUpTime'** shall be calculated else if (endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && absoluteAccrualIntensityEnabled) { return(TRUE) } # Case 5: 'followUpTime'** shall be calculated else if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && absoluteAccrualIntensityEnabled) { return(TRUE) } # Case 6: 'followUpTime'** shall be calculated else if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { return(TRUE) } # (**) Cannot be calculated directly but with 'getSampleSizeSurvival()' or 'getPowerSurvival()' return(FALSE) }, .validate = function() { # Case 6 if (!endOfAccrualIsUserDefined && maxNumberOfSubjectsIsUserDefined && !absoluteAccrualIntensityEnabled) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the calculation of 'followUpTime' for given 'maxNumberOfSubjects' ", "and relative accrual intensities (< 1) ", "can only be done if end of accrual is defined" ) } # Case 8 else if (!endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined && followUpTimeMustBeUserDefined && !absoluteAccrualIntensityEnabled) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the calculation of 'maxNumberOfSubjects' for given 'followUpTime' ", "and relative accrual intensities (< 1) ", "can only be done if end of accrual is defined" ) } }, .toString = function(startWithUpperCase = FALSE) { s <- "accrual time" return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) }, .getAccrualTimeWithoutLeadingZero = function() { if (length(accrualTime) <= 1) { return(NA_real_) } return(accrualTime[2:length(accrualTime)]) }, isAccrualTimeEnabled = function() { if (length(accrualTime) == 0) { return(FALSE) } if (length(accrualTime) == 1 && is.na(accrualTime)) { return(FALSE) } return(TRUE) }, .initFromList = function(accrualTimeList) { if (!is.list(accrualTimeList)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' must be a list") } if (length(accrualTimeList) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' must contain at least one entry") } if (is.null(names(accrualTimeList))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' must be a named list where the names specify ", "the time regions and the values the accrual time" ) } if (.showWarnings && !all(is.na(accrualIntensity)) && (length(accrualIntensity) != 1 || accrualIntensity != C_ACCRUAL_INTENSITY_DEFAULT)) { warning("'accrualIntensity' (", .arrayToString(accrualIntensity), ") will be ignored because 'accrualTime' is a list", call. = FALSE ) } accrualTime <<- numeric(0) accrualIntensity <<- numeric(0) timeRegions <- names(accrualTimeList) endOfAccrualIsUndefined <- FALSE accrualTime <<- c(accrualTime, 0) for (i in 1:length(timeRegions)) { timePeriod <- timeRegions[i] accrualTimeValue <- accrualTimeList[[timePeriod]] .assertIsSingleNumber(accrualTimeValue, paste0("accrualTime[", i, "]")) settings <- .validateTimePeriod(timePeriod, i = i, n = length(timeRegions), accrualTimeMode = TRUE) timePeriod <- settings$timePeriod endOfAccrualIsUndefined <- settings$endOfAccrualIsUndefined if (i < length(timeRegions)) { parts <- strsplit(timePeriod, "- *(< *)?", perl = TRUE)[[1]] if (length(parts) != 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all regions (", timePeriod, ") must have the format ", "\"time_1 - = 2 && length(accrualTime) == length(accrualIntensity) + 1 && !any(is.na(accrualTime)) && !any(is.na(accrualIntensity))) { len <- length(accrualIntensity) accrualIntensityAbsolute <- maxNumberOfSubjects / sum((accrualTime[2:(len + 1)] - accrualTime[1:len]) * accrualIntensity) * accrualIntensity if (!isTRUE(all.equal(accrualIntensityAbsolute, accrualIntensity, tolerance = 1e-06)) && !isTRUE(all.equal(accrualIntensityAbsolute, 0, tolerance = 1e-06))) { .validateAccrualTimeAndIntensity() if (absoluteAccrualIntensityEnabled && .getParameterType("maxNumberOfSubjects") == C_PARAM_USER_DEFINED) { if (.getParameterType("accrualTime") == C_PARAM_DEFAULT_VALUE) { accrualTime <<- maxNumberOfSubjects / accrualIntensity .setParameterType("accrualTime", C_PARAM_GENERATED) remainingTime <<- accrualTime accrualTime <<- c(0, accrualTime) } else { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") disagrees with ", "the defined accrual time (", .arrayToString(accrualTime), ") and intensity: ", .getFormula(), " = ", .getSampleSize() ) } } else { if (!absoluteAccrualIntensityEnabled && # .isRelativeAccrualIntensity(accrualIntensity) .getParameterType("accrualIntensity") == C_PARAM_USER_DEFINED && .getParameterType("accrualTime") == C_PARAM_DEFAULT_VALUE && .getParameterType("maxNumberOfSubjects") == C_PARAM_USER_DEFINED) { if (.showWarnings) { warning("'accrualIntensity' (", accrualIntensity, ") will be ignored", call. = FALSE) } accrualIntensityRelative <<- C_ACCRUAL_INTENSITY_DEFAULT accrualIntensity <<- accrualIntensityAbsolute .setParameterType("accrualIntensity", C_PARAM_GENERATED) .setParameterType("remainingTime", C_PARAM_NOT_APPLICABLE) } else { accrualIntensityRelative <<- accrualIntensity accrualIntensity <<- accrualIntensityAbsolute .setParameterType("accrualIntensity", C_PARAM_GENERATED) .setParameterType("accrualIntensityRelative", C_PARAM_USER_DEFINED) } } } } }, .isNoPiecewiseAccrualTime = function(accrualTimeArg) { if (length(accrualTimeArg) == 0 || any(is.na(accrualTimeArg)) || !all(is.numeric(accrualTimeArg))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'accrualTimeArg' must a be valid numeric vector") } if (length(accrualTimeArg) == 1) { return(TRUE) } if (length(accrualTimeArg) == 2 && accrualTimeArg[1] == 0) { return(TRUE) } return(FALSE) }, .init = function(accrualTimeArg) { if (length(accrualTimeArg) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'accrualTime' must be defined") } if (length(accrualTimeArg) == 1 && is.numeric(accrualTimeArg) && is.na(accrualTimeArg)) { accrualTimeArg <- C_ACCRUAL_TIME_DEFAULT } calculateLastAccrualTimeEnabled <- FALSE if (is.list(accrualTimeArg)) { endOfAccrualIsUndefined <- .initFromList(accrualTimeArg) calculateLastAccrualTimeEnabled <- endOfAccrualIsUndefined && !is.null(maxNumberOfSubjects) && length(maxNumberOfSubjects) == 1 && !is.na(maxNumberOfSubjects) } else if (is.numeric(accrualTimeArg)) { .assertIsNumericVector(accrualTimeArg, "accrualTime") if (length(accrualIntensity) > 1) { .assertIsNumericVector(accrualIntensity, "accrualIntensity") } if (.isNoPiecewiseAccrualTime(accrualTimeArg) && (length(accrualIntensity) == 0 || is.null(accrualIntensity) || all(is.na(accrualIntensity)) || all(accrualIntensity == C_ACCRUAL_INTENSITY_DEFAULT))) { accrualTimeArg <- accrualTimeArg[length(accrualTimeArg)] accrualTime <<- c(0L, accrualTimeArg) .setParameterType("accrualTime", ifelse( identical(as.integer(accrualTime), C_ACCRUAL_TIME_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) accrualIntensity <<- C_ACCRUAL_INTENSITY_DEFAULT .setParameterType("accrualIntensity", C_PARAM_DEFAULT_VALUE) .setParameterType( "maxNumberOfSubjects", ifelse(length(maxNumberOfSubjects) == 1 && is.na(maxNumberOfSubjects), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED ) ) endOfAccrualIsUserDefined <<- length(accrualTime) == length(accrualIntensity) + 1 maxNumberOfSubjectsIsUserDefined <<- .getParameterType("maxNumberOfSubjects") == C_PARAM_USER_DEFINED followUpTimeMustBeUserDefined <<- !endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined absoluteAccrualIntensityEnabled <<- FALSE if (maxNumberOfSubjectsIsUserDefined) { accrualIntensity <<- maxNumberOfSubjects / accrualTime[length(accrualTime)] .setParameterType("accrualIntensity", C_PARAM_GENERATED) } return(invisible()) } accrualTime <<- accrualTimeArg if (length(accrualTime) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' must contain at least one time value" ) } if (accrualTime[1] != 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the first value of 'accrualTime' (", .arrayToString(accrualTime), ") must be 0" ) } .setParameterType("accrualTime", ifelse( identical(as.integer(accrualTime), C_ACCRUAL_TIME_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) .setParameterType("accrualIntensity", C_PARAM_USER_DEFINED) } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' must be a list or a numeric vector") } if (is.na(absoluteAccrualIntensityEnabled)) { absoluteAccrualIntensityEnabled <<- .isAbsoluteAccrualIntensity(accrualIntensity) } if (is.null(maxNumberOfSubjects) || length(maxNumberOfSubjects) == 0 || any(is.na(maxNumberOfSubjects))) { if (length(accrualTime) != length(accrualIntensity) + 1 || !absoluteAccrualIntensityEnabled) { maxNumberOfSubjectsCanBeCalculatedDirectly <<- FALSE } .setParameterType("maxNumberOfSubjects", C_PARAM_NOT_APPLICABLE) } else { if (!(length(accrualTime) %in% c( length(accrualIntensity), length(accrualIntensity) + 1 ))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'accrualTime' (", length(accrualTime), ") must be equal to length of 'accrualIntensity' if the last 'accrualTime' ", "shall be calculated ", "based on 'maxNumberOfSubjects' or length of 'accrualIntensity' (", length(accrualIntensity), ") + 1 otherwise" ) } if (length(accrualTime) == length(accrualIntensity)) { calculateLastAccrualTimeEnabled <- TRUE } .setParameterType("maxNumberOfSubjects", C_PARAM_USER_DEFINED) } endOfAccrualIsUserDefined <<- length(accrualTime) == length(accrualIntensity) + 1 if (calculateLastAccrualTimeEnabled) { .calculateRemainingTime() } else if (maxNumberOfSubjectsCanBeCalculatedDirectly) { if (length(accrualTime) == 1) { if (length(maxNumberOfSubjects) > 0 && !is.na(maxNumberOfSubjects) && maxNumberOfSubjects > 0 && maxNumberOfSubjects < accrualIntensity[1]) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") ", "must be >= ", accrualIntensity[1], " ('accrualIntensity')" ) } remainingTime <<- accrualTime .setParameterType("remainingTime", C_PARAM_USER_DEFINED) } else if (length(accrualTime) > 1) { sampleSize <- .getSampleSize() if (!isTRUE(all.equal(sampleSize, maxNumberOfSubjects, tolerance = 1e-04))) { if (length(maxNumberOfSubjects) == 1 && !is.na(maxNumberOfSubjects) && maxNumberOfSubjects > 0 && maxNumberOfSubjects < sampleSize) { if (length(accrualIntensity) == 1 && length(accrualTime) == 1) { .setParameterType("maxNumberOfSubjects", C_PARAM_USER_DEFINED) accrualTime <<- 0 .calculateRemainingTime() } else { if (length(accrualTime) == length(accrualIntensity) + 1 && absoluteAccrualIntensityEnabled) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") disagrees with ", "the defined accrual time and intensity: ", .getFormula(), " = ", sampleSize ) } else { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") ", "must be >= ", sampleSize ) } } } else { if ((length(maxNumberOfSubjects) != 1 || is.na(maxNumberOfSubjects)) && absoluteAccrualIntensityEnabled) { maxNumberOfSubjects <<- sampleSize .setParameterType("maxNumberOfSubjects", C_PARAM_GENERATED) } remainingTime <<- accrualTime[length(accrualTime)] - accrualTime[length(accrualTime) - 1] .setParameterType( "remainingTime", ifelse(!isTRUE(all.equal(0, remainingTime, tolerance = 1e-06)), C_PARAM_GENERATED, C_PARAM_NOT_APPLICABLE ) ) } } } } .validateInitialization() maxNumberOfSubjectsIsUserDefined <<- .getParameterType("maxNumberOfSubjects") == C_PARAM_USER_DEFINED followUpTimeMustBeUserDefined <<- !endOfAccrualIsUserDefined && !maxNumberOfSubjectsIsUserDefined }, .getSampleSize = function() { if (length(accrualTime) < 2) { return(0) } sampleSize <- 0 for (i in 2:length(accrualTime)) { time <- accrualTime[i] - accrualTime[i - 1] sampleSize <- sampleSize + time * accrualIntensity[i - 1] } return(sampleSize) }, .getValuesAfterDecimalPoint = function(x) { values <- c() for (value in x) { baseLevel <- value - floor(value) if (baseLevel == 0) { baseLevel <- 1 } values <- c(values, baseLevel) } return(values) }, .getBaseLevel = function(x) { return(min(.getValuesAfterDecimalPoint(x[x > 0]))) }, .calcSampleSize = function() { if (length(accrualTime) <= 1) { return(0) } accrualTimeTemp <- accrualTime accrualIntensityTemp <- accrualIntensity sampleSize <- 0 for (i in 2:length(accrualTime)) { time <- accrualTime[i] - accrualTime[i - 1] sampleSize <- sampleSize + time * accrualIntensity[i - 1] if (sampleSize >= maxNumberOfSubjects && length(accrualTime) == length(accrualIntensity)) { if (sampleSize > maxNumberOfSubjects) { accrualTime <<- accrualTime[1:(i - 1)] } i2 <- i if (length(accrualTime) == length(accrualIntensity) + 1) { i2 <- i - 1 } accrualIntensity <<- accrualIntensity[1:(i2 - 1)] while (length(accrualTime) > length(accrualIntensity) + 1) { accrualTime <<- accrualTime[1:(length(accrualTime) - 1)] } sampleSize <- 0 if (length(accrualTime) > 1) { sampleSize <- .getSampleSize() } if (.showWarnings) { n1 <- length(accrualTimeTemp) - length(accrualTime) n2 <- length(accrualIntensityTemp) - length(accrualIntensity) if (n1 == 1) { warning("Last accrual time value (", accrualTimeTemp[length(accrualTimeTemp)], ") ignored", call. = FALSE ) } else if (n1 > 1) { warning("Last ", n1, " accrual time values (", .arrayToString(accrualTimeTemp[(length(accrualTimeTemp) - n1 + 1):length(accrualTimeTemp)]), ") ignored", call. = FALSE ) } if (n2 == 1) { warning("Last accrual intensity value (", accrualIntensityTemp[length(accrualIntensityTemp)], ") ignored", call. = FALSE ) } else if (n2 > 1) { warning("Last ", n2, " accrual intensity values (", .arrayToString(accrualIntensityTemp[i2:length(accrualIntensityTemp)]), ") ignored", call. = FALSE ) } } return(sampleSize) } } return(sampleSize) }, .calculateRemainingTime = function(stopInCaseOfError = TRUE) { .assertIsValidMaxNumberOfSubjects(maxNumberOfSubjects) sampleSize <- .calcSampleSize() remainingSubjects <- maxNumberOfSubjects - sampleSize if (remainingSubjects < 0) { if (!stopInCaseOfError) { return(invisible()) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") ", "is too small for the defined accrual time (minimum = ", sampleSize, ")" ) } lastAccrualIntensity <- accrualIntensity[length(accrualIntensity)] remainingTime <<- remainingSubjects / lastAccrualIntensity .setParameterType( "remainingTime", ifelse(!isTRUE(all.equal(0, remainingTime, tolerance = 1e-06)), C_PARAM_GENERATED, C_PARAM_NOT_APPLICABLE ) ) if (length(accrualTime) == length(accrualIntensity)) { accrualTime <<- c(accrualTime, accrualTime[length(accrualTime)] + remainingTime) } # .setParameterType("accrualTime", C_PARAM_GENERATED) if (any(accrualTime < 0)) { if (!stopInCaseOfError) { return(invisible()) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' (", maxNumberOfSubjects, ") ", "is too small for the defined accrual time" ) } }, .validateAccrualTimeAndIntensity = function() { if ((length(accrualTime) >= 2 && any(accrualTime[2:length(accrualTime)] < 0))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' (", .arrayToString(accrualTime), ") must be > 0" ) } .assertValuesAreStrictlyIncreasing(accrualTime, "accrualTime") if ((length(accrualTime) > 1) && any(accrualIntensity < 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualIntensity' (", .arrayToString(accrualIntensity), ") must be >= 0" ) } if (length(accrualIntensity) == 1 && !is.na(accrualIntensity) && accrualIntensity == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "at least one 'accrualIntensity' value must be > 0" ) } if (length(accrualIntensity) > 0 && accrualIntensity[1] == 0) { warning( "It makes no sense to start 'accrualIntensity' (", .arrayToString(accrualIntensity), ") with 0" ) } }, .validateInitialization = function() { .validateAccrualTimeAndIntensity() piecewiseAccrualEnabled <<- !.isNoPiecewiseAccrualTime(accrualTime) } ) ) rpact/R/class_design.R0000644000176200001440000013422414445307575014371 0ustar liggesusers## | ## | *Trial design classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_constants.R #' @include f_core_plot.R #' @include f_core_utilities.R NULL #' #' @name TrialDesign #' #' @title #' Basic Trial Design #' #' @description #' Basic class for trial designs. #' #' @template field_kMax #' @template field_alpha #' @template field_stages #' @template field_informationRates #' @template field_userAlphaSpending #' @template field_criticalValues #' @template field_stageLevels #' @template field_alphaSpent #' @template field_bindingFutility #' @template field_tolerance #' #' @details #' \code{TrialDesign} is the basic class for #' \itemize{ #' \item \code{\link{TrialDesignFisher}}, #' \item \code{\link{TrialDesignGroupSequential}}, #' \item \code{\link{TrialDesignInverseNormal}}, and #' \item \code{\link{TrialDesignConditionalDunnett}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' @include f_core_plot.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesign <- setRefClass("TrialDesign", contains = "ParameterSet", fields = list( .plotSettings = "PlotSettings", kMax = "integer", alpha = "numeric", stages = "integer", informationRates = "numeric", userAlphaSpending = "numeric", criticalValues = "numeric", stageLevels = "numeric", alphaSpent = "numeric", bindingFutility = "logical", tolerance = "numeric" ), methods = list( initialize = function(..., alpha = NA_real_, informationRates = NA_real_, userAlphaSpending = NA_real_, criticalValues = NA_real_, stageLevels = NA_real_, alphaSpent = NA_real_, bindingFutility = NA, tolerance = 1e-06 # C_ANALYSIS_TOLERANCE_DEFAULT ) { callSuper(..., alpha = alpha, informationRates = informationRates, userAlphaSpending = userAlphaSpending, criticalValues = criticalValues, stageLevels = stageLevels, alphaSpent = alphaSpent, bindingFutility = bindingFutility, tolerance = tolerance ) .plotSettings <<- PlotSettings() if (inherits(.self, "TrialDesignConditionalDunnett")) { .parameterNames <<- C_PARAMETER_NAMES } else { .parameterNames <<- .getSubListByNames(.getParameterNames(design = .self), c( "stages", "kMax", "alpha", "informationRates", "userAlphaSpending", "criticalValues", "stageLevels", "alphaSpent", "bindingFutility", "tolerance" )) } .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS .initStages() }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing trial design objects" .resetCat() if (showType == 3) { .createSummary(.self, digits = digits)$.show( showType = 1, digits = digits, consoleOutputEnabled = consoleOutputEnabled ) } else if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Design parameters and output of ", .toString(), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDerivedParameters(), "Derived from user defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), "Output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } }, .toString = function(startWithUpperCase = FALSE) { s <- "unknown trial design" if (.isTrialDesignGroupSequential(.self)) { s <- "group sequential design" } else if (.isTrialDesignInverseNormal(.self)) { s <- "inverse normal combination test design" } else if (.isTrialDesignFisher(.self)) { s <- "Fisher's combination test design" } else if (.isTrialDesignConditionalDunnett(.self)) { s <- "conditional Dunnett test design" } return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) }, .initStages = function() { if (length(kMax) == 1 && !is.na(kMax) && kMax > 0) { stages <<- c(1L:kMax) if (kMax == C_KMAX_DEFAULT) { .setParameterType("stages", C_PARAM_DEFAULT_VALUE) } else { type <- .getParameterType("kMax") .setParameterType("stages", ifelse(type != C_PARAM_TYPE_UNKNOWN, type, C_PARAM_USER_DEFINED)) } } else { .setParameterType("stages", C_PARAM_NOT_APPLICABLE) } }, .isDelayedResponseDesign = function() { return((inherits(.self, "TrialDesignGroupSequential") || inherits(.self, "TrialDesignInverseNormal")) && .self$kMax > 1 && !is.null(.self[["delayedInformation"]]) && !any(is.na(.self$delayedInformation)) && any(.self$delayedInformation > 0)) } ) ) #' #' @name TrialDesignCharacteristics #' #' @title #' Trial Design Characteristics #' #' @description #' Class for trial design characteristics. #' #' @template field_nFixed #' @template field_shift #' @template field_inflationFactor #' @template field_stages #' @template field_information #' @template field_power #' @template field_rejectionProbabilities #' @template field_futilityProbabilities #' @template field_averageSampleNumber1 #' @template field_averageSampleNumber01 #' @template field_averageSampleNumber0 #' #' @details #' \code{TrialDesignCharacteristics} contains all fields required to collect the characteristics of a design. #' This object should not be created directly; use \code{getDesignCharacteristics} #' with suitable arguments to create it. #' #' @seealso \code{\link{getDesignCharacteristics}} for getting the design characteristics. #' #' @include class_core_parameter_set.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignCharacteristics <- setRefClass("TrialDesignCharacteristics", contains = "ParameterSet", fields = list( .design = "TrialDesign", .probs = "matrix", nFixed = "numeric", shift = "numeric", inflationFactor = "numeric", stages = "integer", information = "numeric", power = "numeric", rejectionProbabilities = "numeric", # efficacy probabilities futilityProbabilities = "numeric", averageSampleNumber1 = "numeric", averageSampleNumber01 = "numeric", averageSampleNumber0 = "numeric" ), methods = list( initialize = function(design, ...) { callSuper(.design = design, ...) .parameterNames <<- .getParameterNames(design = design) .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS .parameterFormatFunctions[["nFixed"]] <<- ".formatProbabilities" .initStages() }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing trial design characteristics objects" .resetCat() if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .showParametersOfOneGroup(.getGeneratedParameters(), title = .toString(startWithUpperCase = TRUE), orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } }, .initStages = function() { if (!is.na(.design$kMax) && .design$kMax > 0) { stages <<- c(1L:.design$kMax) if (.design$kMax == C_KMAX_DEFAULT) { .setParameterType("stages", C_PARAM_DEFAULT_VALUE) } else { .setParameterType("stages", C_PARAM_USER_DEFINED) } } else { .setParameterType("stages", C_PARAM_NOT_APPLICABLE) } }, .toString = function(startWithUpperCase = FALSE) { if (.design$.isDelayedResponseDesign()) { prefix <- "delayed response" if (startWithUpperCase) { prefix <- .firstCharacterToUpperCase(prefix) } return(paste(prefix, .design$.toString(startWithUpperCase = FALSE), "characteristics")) } return(paste(.design$.toString(startWithUpperCase = startWithUpperCase), "characteristics")) } ) ) #' #' @title #' Trial Design Characteristics Printing #' #' @param x The trial design characteristics object. #' @param markdown If \code{TRUE}, the object \code{x} will be printed using markdown syntax; #' normal representation will be used otherwise (default is \code{FALSE}) #' @param showDesign Show the design print output above the design characteristics, default is \code{TRUE}. #' @inheritParams param_three_dots_plot #' #' @description #' Prints the design characteristics object. #' #' @details #' Generic function to print all kinds of design characteristics. #' #' @export #' print.TrialDesignCharacteristics <- function(x, ..., markdown = FALSE, showDesign = TRUE) { if (showDesign) { print.ParameterSet(x$.design, ..., markdown = markdown) } print.ParameterSet(x, ..., markdown = markdown) } #' #' @title #' Coerce TrialDesignCharacteristics to a Data Frame #' #' @description #' Returns the \code{TrialDesignCharacteristics} as data frame. #' #' @param x A \code{\link{TrialDesignCharacteristics}} object. #' @inheritParams param_niceColumnNamesEnabled #' @inheritParams param_includeAllParameters #' @inheritParams param_three_dots #' #' @details #' Each element of the \code{\link{TrialDesignCharacteristics}} is converted to a column in the data frame. #' #' @template return_dataframe #' #' @examples #' as.data.frame(getDesignCharacteristics(getDesignGroupSequential())) #' #' @export #' #' @keywords internal #' as.data.frame.TrialDesignCharacteristics <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ...) { if (x$.design$kMax > 1) { parameterNamesToBeExcluded <- c("nFixed", "shift") } else { parameterNamesToBeExcluded <- c("inflationFactor") } return(.getAsDataFrame( parameterSet = x, parameterNames = parameterNamesToBeExcluded, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters, handleParameterNamesAsToBeExcluded = TRUE, tableColumnNames = .getTableColumnNames(design = x$.design) )) } #' #' @name TrialDesignFisher #' #' @title #' Fisher Design #' #' @description #' Trial design for Fisher's combination test. #' #' @template field_kMax #' @template field_alpha #' @template field_stages #' @template field_informationRates #' @template field_userAlphaSpending #' @template field_criticalValues #' @template field_stageLevels #' @template field_alphaSpent #' @template field_bindingFutility #' @template field_tolerance #' @template field_method #' @template field_alpha0Vec #' @template field_scale #' @template field_nonStochasticCurtailment #' @template field_sided #' @template field_simAlpha #' @template field_iterations #' @template field_seed #' #' @details #' This object should not be created directly; use \code{\link{getDesignFisher}} #' with suitable arguments to create a Fisher combination test design. #' #' @seealso \code{\link{getDesignFisher}} for creating a Fisher combination test design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignFisher <- setRefClass(C_CLASS_NAME_TRIAL_DESIGN_FISHER, contains = "TrialDesign", fields = list( method = "character", alpha0Vec = "numeric", scale = "numeric", nonStochasticCurtailment = "logical", sided = "integer", simAlpha = "numeric", iterations = "integer", seed = "numeric" ), methods = list( initialize = function(..., method = NA_character_, alpha0Vec = NA_real_, scale = NA_real_, nonStochasticCurtailment = FALSE, sided = as.integer(C_SIDED_DEFAULT), simAlpha = NA_real_, iterations = 0L, seed = NA_real_, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT) { callSuper(..., method = method, alpha0Vec = alpha0Vec, scale = scale, nonStochasticCurtailment = nonStochasticCurtailment, sided = sided, simAlpha = simAlpha, iterations = iterations, seed = seed, tolerance = tolerance ) .parameterNames <<- c(.parameterNames, .getSubListByNames( .getParameterNames(design = .self), c( "method", "alpha0Vec", "scale", "nonStochasticCurtailment", "sided", "simAlpha", "iterations", "seed" ) )) .parameterFormatFunctions$criticalValues <<- ".formatCriticalValuesFisher" .initParameterTypes() .setParameterType("iterations", C_PARAM_NOT_APPLICABLE) .setParameterType("seed", C_PARAM_NOT_APPLICABLE) .initStages() }, hasChanged = function(kMax, alpha, sided, method, informationRates, alpha0Vec, userAlphaSpending, bindingFutility) { informationRatesTemp <- informationRates if (any(is.na(informationRatesTemp))) { informationRatesTemp <- .getInformationRatesDefault(kMax) } alpha0VecTemp <- alpha0Vec[1:(kMax - 1)] if (any(is.na(alpha0VecTemp))) { alpha0VecTemp <- rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - 1) } if (!identical(kMax, .self$kMax)) { return(TRUE) } if (!identical(alpha, .self$alpha)) { return(TRUE) } if (!identical(sided, .self$sided)) { return(TRUE) } if (!identical(method, .self$method)) { return(TRUE) } if (!identical(informationRatesTemp, .self$informationRates)) { return(TRUE) } if (!identical(alpha0VecTemp, .self$alpha0Vec)) { return(TRUE) } if (!identical(userAlphaSpending, .self$userAlphaSpending)) { return(TRUE) } if (!identical(bindingFutility, .self$bindingFutility)) { return(TRUE) } return(FALSE) }, # Defines the order of the parameter output .getParametersToShow = function() { return(c( "method", "kMax", "stages", "informationRates", "alpha", "alpha0Vec", "bindingFutility", "sided", "tolerance", "iterations", "seed", "alphaSpent", "userAlphaSpending", "criticalValues", "stageLevels", "scale", "simAlpha", "nonStochasticCurtailment" )) } ) ) #' #' @name TrialDesignInverseNormal #' #' @title #' Inverse Normal Design #' #' @description #' Trial design for inverse normal method. #' #' @template field_kMax #' @template field_alpha #' @template field_stages #' @template field_informationRates #' @template field_userAlphaSpending #' @template field_criticalValues #' @template field_stageLevels #' @template field_alphaSpent #' @template field_bindingFutility #' @template field_tolerance #' @template field_typeOfDesign #' @template field_beta #' @template field_deltaWT #' @template field_deltaPT1 #' @template field_deltaPT0 #' @template field_futilityBounds #' @template field_gammaA #' @template field_gammaB #' @template field_optimizationCriterion #' @template field_sided #' @template field_betaSpent #' @template field_typeBetaSpending #' @template field_userBetaSpending #' @template field_power #' @template field_twoSidedPower #' @template field_constantBoundsHP #' @template field_betaAdjustment #' @template field_delayedInformation #' @template field_decisionCriticalValues #' @template field_reversalProbabilities #' #' @details #' This object should not be created directly; use \code{\link[=getDesignInverseNormal]{getDesignInverseNormal()}} #' with suitable arguments to create a inverse normal design. #' #' @seealso \code{\link[=getDesignInverseNormal]{getDesignInverseNormal()}} for creating a inverse normal design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignInverseNormal <- setRefClass(C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, contains = "TrialDesign", fields = list( typeOfDesign = "character", beta = "numeric", deltaWT = "numeric", deltaPT1 = "numeric", deltaPT0 = "numeric", futilityBounds = "numeric", gammaA = "numeric", gammaB = "numeric", optimizationCriterion = "character", sided = "integer", betaSpent = "numeric", typeBetaSpending = "character", userBetaSpending = "numeric", power = "numeric", twoSidedPower = "logical", constantBoundsHP = "numeric", betaAdjustment = "logical", delayedInformation = "numeric", decisionCriticalValues = "numeric", reversalProbabilities = "numeric" ), methods = list( initialize = function(..., beta = C_BETA_DEFAULT, betaSpent = NA_real_, sided = C_SIDED_DEFAULT, futilityBounds = NA_real_, typeOfDesign = C_DEFAULT_TYPE_OF_DESIGN, deltaWT = NA_real_, deltaPT1 = NA_real_, deltaPT0 = NA_real_, optimizationCriterion = C_OPTIMIZATION_CRITERION_DEFAULT, gammaA = NA_real_, gammaB = NA_real_, typeBetaSpending = C_TYPE_OF_DESIGN_BS_NONE, userBetaSpending = NA_real_, power = NA_real_, twoSidedPower = C_TWO_SIDED_POWER_DEFAULT, constantBoundsHP = NA_real_, betaAdjustment = TRUE, # impl as constant delayedInformation = NA_real_) { callSuper(..., beta = beta, betaSpent = betaSpent, sided = sided, futilityBounds = futilityBounds, typeOfDesign = typeOfDesign, deltaWT = deltaWT, deltaPT1 = deltaPT1, deltaPT0 = deltaPT0, optimizationCriterion = optimizationCriterion, gammaA = gammaA, gammaB = gammaB, typeBetaSpending = typeBetaSpending, userBetaSpending = userBetaSpending, power = power, twoSidedPower = twoSidedPower, constantBoundsHP = constantBoundsHP, betaAdjustment = betaAdjustment, delayedInformation = delayedInformation ) .initParameterNames() .parameterFormatFunctions$criticalValues <<- ".formatCriticalValues" .initParameterTypes() .initStages() .setParameterType("betaAdjustment", C_PARAM_NOT_APPLICABLE) .setParameterType("delayedInformation", C_PARAM_NOT_APPLICABLE) .setParameterType("decisionCriticalValues", C_PARAM_NOT_APPLICABLE) .setParameterType("reversalProbabilities", C_PARAM_NOT_APPLICABLE) }, .initParameterNames = function() { .parameterNames <<- c(.parameterNames, .getSubListByNames( .getParameterNames(design = .self), c( "beta", "betaSpent", "sided", "futilityBounds", "typeOfDesign", "deltaWT", "deltaPT1", "deltaPT0", "optimizationCriterion", "gammaA", "gammaB", "typeBetaSpending", "userBetaSpending", "power", "twoSidedPower", "constantBoundsHP", "betaAdjustment", "delayedInformation", "decisionCriticalValues", "reversalProbabilities" ) )) }, .formatComparisonResult = function(x) { if (is.null(x) || length(x) == 0 || !is.numeric(x)) { return(x) } s <- sprintf("%.9f", x) s <- sub("\\.0+", "", s) return(s) }, .pasteComparisonResult = function(name, newValue, oldValue) { return(paste0( name, "_new = ", .arrayToString(.formatComparisonResult(newValue)), " (", .getClassName(newValue), "), ", name, "_old = ", .arrayToString(.formatComparisonResult(oldValue)), " (", .getClassName(oldValue), ")" )) }, hasChanged = function(..., kMax, alpha, beta, sided, typeOfDesign, deltaWT, deltaPT1, deltaPT0, informationRates, futilityBounds, optimizationCriterion, typeBetaSpending, gammaA, gammaB, bindingFutility, userAlphaSpending, userBetaSpending, twoSidedPower, constantBoundsHP, betaAdjustment = TRUE, delayedInformation = NA_real_) { informationRatesTemp <- informationRates if (any(is.na(informationRatesTemp))) { informationRatesTemp <- .getInformationRatesDefault(kMax) } futilityBoundsTemp <- futilityBounds[1:(kMax - 1)] if (any(is.na(futilityBoundsTemp))) { futilityBoundsTemp <- rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - 1) } if (!identical(kMax, .self$kMax)) { return(.pasteComparisonResult("kMax", kMax, .self$kMax)) } if (!identical(alpha, .self$alpha)) { return(.pasteComparisonResult("alpha", alpha, .self$alpha)) } if (!identical(beta, .self$beta)) { return(.pasteComparisonResult("beta", beta, .self$beta)) } if (!identical(sided, .self$sided)) { return(.pasteComparisonResult("sided", sided, .self$sided)) } if (!identical(twoSidedPower, .self$twoSidedPower)) { return(.pasteComparisonResult("twoSidedPower", twoSidedPower, .self$twoSidedPower)) } if (kMax == 1) { return(FALSE) } if (!identical(betaAdjustment, .self$betaAdjustment)) { return(.pasteComparisonResult("betaAdjustment", betaAdjustment, .self$betaAdjustment)) } if (!identical(delayedInformation, .self$delayedInformation)) { return(.pasteComparisonResult("delayedInformation", delayedInformation, .self$delayedInformation)) } if (!identical(typeOfDesign, .self$typeOfDesign)) { return(.pasteComparisonResult("typeOfDesign", typeOfDesign, .self$typeOfDesign)) } if (typeOfDesign == C_TYPE_OF_DESIGN_WT) { if (!identical(deltaWT, .self$deltaWT)) { return(.pasteComparisonResult("deltaWT", deltaWT, .self$deltaWT)) } } if (typeOfDesign == C_TYPE_OF_DESIGN_PT) { if (!identical(deltaPT1, .self$deltaPT1)) { return(.pasteComparisonResult("deltaPT1", deltaPT1, .self$deltaPT1)) } if (!identical(deltaPT0, .self$deltaPT0)) { return(.pasteComparisonResult("deltaPT0", deltaPT0, .self$deltaPT0)) } } if (!identical(informationRatesTemp, .self$informationRates)) { return(.pasteComparisonResult("informationRates", informationRatesTemp, .self$informationRates)) } if (.getParameterType("futilityBounds") != C_PARAM_GENERATED && (!grepl("^as.*", typeOfDesign) || typeBetaSpending == C_TYPE_OF_DESIGN_BS_NONE) && !identical(futilityBoundsTemp, .self$futilityBounds)) { return(.pasteComparisonResult("futilityBounds", futilityBoundsTemp, .self$futilityBounds)) } if (!identical(optimizationCriterion, .self$optimizationCriterion)) { return(.pasteComparisonResult("optimizationCriterion", optimizationCriterion, .self$optimizationCriterion)) } if (!identical(typeBetaSpending, .self$typeBetaSpending)) { return(.pasteComparisonResult("typeBetaSpending", typeBetaSpending, .self$typeBetaSpending)) } if (!identical(gammaA, .self$gammaA)) { return(.pasteComparisonResult("gammaA", gammaA, .self$gammaA)) } if (!identical(gammaB, .self$gammaB)) { return(.pasteComparisonResult("gammaB", gammaB, .self$gammaB)) } if ((typeOfDesign == C_TYPE_OF_DESIGN_PT && !identical(bindingFutility, .self$bindingFutility)) || (!identical(bindingFutility, .self$bindingFutility) && .getParameterType("futilityBounds") != C_PARAM_NOT_APPLICABLE && (sided == 1 || !grepl("^as.*", typeOfDesign) || typeBetaSpending == C_TYPE_OF_DESIGN_BS_NONE) && (any(na.omit(futilityBounds) > -6) || any(na.omit(.self$futilityBounds) > -6)) )) { return(.pasteComparisonResult("bindingFutility", bindingFutility, .self$bindingFutility)) } if (!identical(userAlphaSpending, .self$userAlphaSpending)) { return(.pasteComparisonResult("userAlphaSpending", userAlphaSpending, .self$userAlphaSpending)) } if (!identical(userBetaSpending, .self$userBetaSpending)) { return(.pasteComparisonResult("userBetaSpending", userBetaSpending, .self$userBetaSpending)) } if (!identical(twoSidedPower, .self$twoSidedPower)) { return(.pasteComparisonResult("twoSidedPower", twoSidedPower, .self$twoSidedPower)) } if (typeOfDesign == C_TYPE_OF_DESIGN_HP) { if (!identical(constantBoundsHP, .self$constantBoundsHP)) { return(.pasteComparisonResult("constantBoundsHP", constantBoundsHP, .self$constantBoundsHP)) } } return(FALSE) }, # Defines the order of the parameter output .getParametersToShow = function() { return(c( "typeOfDesign", "kMax", "stages", "informationRates", "alpha", "beta", "power", "twoSidedPower", "deltaWT", "deltaPT1", "deltaPT0", "futilityBounds", "bindingFutility", "constantBoundsHP", "gammaA", "gammaB", "optimizationCriterion", "sided", "betaAdjustment", "delayedInformation", "tolerance", "alphaSpent", "userAlphaSpending", "betaSpent", "typeBetaSpending", "userBetaSpending", "criticalValues", "stageLevels", "decisionCriticalValues", "reversalProbabilities" )) } ) ) #' #' @name TrialDesignGroupSequential #' #' @title #' Group Sequential Design #' #' @description #' Trial design for group sequential design. #' #' @template field_kMax #' @template field_alpha #' @template field_stages #' @template field_informationRates #' @template field_userAlphaSpending #' @template field_criticalValues #' @template field_stageLevels #' @template field_alphaSpent #' @template field_bindingFutility #' @template field_tolerance #' @template field_typeOfDesign #' @template field_beta #' @template field_deltaWT #' @template field_deltaPT1 #' @template field_deltaPT0 #' @template field_futilityBounds #' @template field_gammaA #' @template field_gammaB #' @template field_optimizationCriterion #' @template field_sided #' @template field_betaSpent #' @template field_typeBetaSpending #' @template field_userBetaSpending #' @template field_power #' @template field_twoSidedPower #' @template field_constantBoundsHP #' @template field_betaAdjustment #' @template field_delayedInformation #' @template field_decisionCriticalValues #' @template field_reversalProbabilities #' #' @details #' This object should not be created directly; use \code{\link[=getDesignGroupSequential]{getDesignGroupSequential()}} #' with suitable arguments to create a group sequential design. #' #' @seealso \code{\link[=getDesignGroupSequential]{getDesignGroupSequential()}} for creating a group sequential design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignGroupSequential <- setRefClass( C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL, contains = C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, methods = list( initialize = function(...) { callSuper(...) .parameterFormatFunctions$criticalValues <<- ".formatCriticalValues" .initStages() }, show = function(showType = 1, digits = NA_integer_) { "Method for automatically printing trial design objects" callSuper(showType = showType, digits = digits) } ) ) #' #' @name TrialDesignConditionalDunnett #' #' @title #' Conditional Dunnett Design #' #' @description #' Trial design for conditional Dunnett tests. #' #' @template field_kMax #' @template field_alpha #' @template field_stages #' @template field_informationRates #' @template field_userAlphaSpending #' @template field_criticalValues #' @template field_stageLevels #' @template field_alphaSpent #' @template field_bindingFutility #' @template field_tolerance #' @template field_informationAtInterim #' @template field_secondStageConditioning #' @template field_sided #' #' @details #' This object should not be created directly; use \code{\link{getDesignConditionalDunnett}} #' with suitable arguments to create a conditional Dunnett test design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' #' @seealso \code{\link{getDesignConditionalDunnett}} for creating a conditional Dunnett test design. #' TrialDesignConditionalDunnett <- setRefClass( C_CLASS_NAME_TRIAL_DESIGN_CONDITIONAL_DUNNETT, contains = "TrialDesign", fields = list( informationAtInterim = "numeric", secondStageConditioning = "logical", sided = "integer" ), methods = list( initialize = function(...) { callSuper(...) notApplicableParameters <- c( "kMax", "stages", "informationRates", "userAlphaSpending", "criticalValues", "stageLevels", "alphaSpent", "bindingFutility", "tolerance" ) for (notApplicableParameter in notApplicableParameters) { .setParameterType(notApplicableParameter, C_PARAM_NOT_APPLICABLE) } .setParameterType("alpha", ifelse( identical(alpha, C_ALPHA_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) .setParameterType("informationAtInterim", ifelse( identical(informationAtInterim, 0.5), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) .setParameterType("secondStageConditioning", ifelse( identical(secondStageConditioning, TRUE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) kMax <<- 2L sided <<- 1L .initStages() }, show = function(showType = 1, digits = NA_integer_) { "Method for automatically printing trial design objects" callSuper(showType = showType, digits = digits) } ) ) #' #' @title #' Get Design Conditional Dunnett Test #' #' @description #' Defines the design to perform an analysis with the conditional Dunnett test. #' #' @inheritParams param_alpha #' @param informationAtInterim The information to be expected at interim, default is \code{informationAtInterim = 0.5}. #' @param secondStageConditioning The way the second stage p-values are calculated within the closed system of hypotheses. #' If \code{secondStageConditioning = FALSE} is specified, the unconditional adjusted p-values are used, otherwise #' conditional adjusted p-values are calculated, default is \code{secondStageConditioning = TRUE} #' (for details, see Koenig et al., 2008). #' #' @details #' For performing the conditional Dunnett test the design must be defined through this function. #' You can define the information fraction and the way of how to compute the second stage #' p-values only in the design definition, and not in the analysis call.\cr #' See \code{\link[=getClosedConditionalDunnettTestResults]{getClosedConditionalDunnettTestResults()}} #' for an example and Koenig et al. (2008) and #' Wassmer & Brannath (2016), chapter 11 for details of the test procedure. #' #' @template return_object_trial_design #' @template how_to_get_help_for_generics #' #' @family design functions #' #' @export #' getDesignConditionalDunnett <- function(alpha = 0.025, # C_ALPHA_DEFAULT informationAtInterim = 0.5, secondStageConditioning = TRUE) { .assertIsValidAlpha(alpha) .assertIsSingleNumber(informationAtInterim, "informationAtInterim") .assertIsInOpenInterval(informationAtInterim, "informationAtInterim", 0, 1) return(TrialDesignConditionalDunnett( alpha = alpha, informationAtInterim = informationAtInterim, secondStageConditioning = secondStageConditioning )) } #' #' @title #' Trial Design Plotting #' #' @description #' Plots a trial design. #' #' @details #' Generic function to plot a trial design. #' #' @param x The trial design, obtained from \cr #' \code{\link[=getDesignGroupSequential]{getDesignGroupSequential()}}, \cr #' \code{\link[=getDesignInverseNormal]{getDesignInverseNormal()}} or \cr #' \code{\link[=getDesignFisher]{getDesignFisher()}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @inheritParams param_palette #' @inheritParams param_theta #' @inheritParams param_nMax #' @inheritParams param_plotPointsEnabled #' @inheritParams param_showSource #' @inheritParams param_plotSettings #' @inheritParams param_legendPosition #' @inheritParams param_grid #' @param type The plot type (default = \code{1}). The following plot types are available: #' \itemize{ #' \item \code{1}: creates a 'Boundaries' plot #' \item \code{3}: creates a 'Stage Levels' plot #' \item \code{4}: creates a 'Error Spending' plot #' \item \code{5}: creates a 'Power and Early Stopping' plot #' \item \code{6}: creates an 'Average Sample Size and Power / Early Stop' plot #' \item \code{7}: creates an 'Power' plot #' \item \code{8}: creates an 'Early Stopping' plot #' \item \code{9}: creates an 'Average Sample Size' plot #' \item \code{"all"}: creates all available plots and returns it as a grid plot or list #' } #' @inheritParams param_three_dots_plot #' #' @details #' Generic function to plot a trial design. #' #' Note that \code{\link[=param_nMax]{nMax}} is not an argument that it passed to \code{ggplot2}. #' Rather, the underlying calculations (e.g. power for different theta's or average sample size) are based #' on calls to function \code{\link[=getPowerAndAverageSampleNumber]{getPowerAndAverageSampleNumber()}} #' which has argument \code{\link[=param_nMax]{nMax}}. #' I.e., \code{\link[=param_nMax]{nMax}} is not an argument to ggplot2 but to #' \code{\link[=getPowerAndAverageSampleNumber]{getPowerAndAverageSampleNumber()}} #' which is called prior to plotting. #' #' @seealso \code{\link[=plot.TrialDesignSet]{plot()}} to compare different designs or design parameters visual. #' #' @template return_object_ggplot #' #' @examples #' \dontrun{ #' design <- getDesignInverseNormal( #' kMax = 3, alpha = 0.025, #' typeOfDesign = "asKD", gammaA = 2, #' informationRates = c(0.2, 0.7, 1), #' typeBetaSpending = "bsOF" #' ) #' if (require(ggplot2)) { #' plot(design) # default: type = 1 #' } #' } #' #' @export #' plot.TrialDesign <- function(x, y, ..., main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1L, palette = "Set1", theta = seq(-1, 1, 0.01), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, grid = 1, plotSettings = NULL) { fCall <- match.call(expand.dots = FALSE) designName <- deparse(fCall$x) .assertGgplotIsInstalled() .assertIsSingleInteger(grid, "grid", validateType = FALSE) typeNumbers <- .getPlotTypeNumber(type, x) if (is.null(plotSettings)) { plotSettings <- .getGridPlotSettings(x, typeNumbers, grid) } p <- NULL plotList <- list() for (typeNumber in typeNumbers) { p <- .plotTrialDesign( x = x, y = y, main = main, xlab = xlab, ylab = ylab, type = typeNumber, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = .getGridLegendPosition(legendPosition, typeNumbers, grid), showSource = showSource, designName = designName, plotSettings = plotSettings, ... ) .printPlotShowSourceSeparator(showSource, typeNumber, typeNumbers) if (length(typeNumbers) > 1) { caption <- .getPlotCaption(x, typeNumber, stopIfNotFound = TRUE) plotList[[caption]] <- p } } if (length(typeNumbers) == 1) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(p)) } return(p) } if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(plotList)) } return(.createPlotResultObject(plotList, grid)) } #' @rdname plot.TrialDesign #' @export plot.TrialDesignCharacteristics <- function(x, y, ...) { plot(x = x$.design, y = y, ...) } .plotTrialDesign <- function(..., x, y, main, xlab, ylab, type, palette, theta, nMax, plotPointsEnabled, legendPosition, showSource, designName, plotSettings = NULL) { .assertGgplotIsInstalled() .assertIsSingleInteger(type, "type", naAllowed = FALSE, validateType = FALSE) if (any(.isTrialDesignFisher(x)) && !(type %in% c(1, 3, 4))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed for Fisher designs; must be 1, 3 or 4" ) } .warnInCaseOfUnknownArguments( functionName = "plot", ignore = c("xlim", "ylim", "companyAnnotationEnabled", "variedParameters"), ... ) if ((type < 5 || type > 9) && !identical(theta, seq(-1, 1, 0.01))) { warning("'theta' (", .reconstructSequenceCommand(theta), ") will be ignored for plot type ", type, call. = FALSE) } if (!missing(y) && !is.null(y) && length(y) == 1 && inherits(y, "TrialDesign")) { args <- list(...) variedParameters <- args[["variedParameters"]] if (is.null(variedParameters)) { if (.isTrialDesignInverseNormalOrGroupSequential(x) && .isTrialDesignInverseNormalOrGroupSequential(y) && x$typeOfDesign != y$typeOfDesign) { variedParameters <- "typeOfDesign" } else { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'variedParameters' needs to be specified, e.g., variedParameters = \"typeOfDesign\"" ) } } designSet <- getDesignSet(designs = c(x, y), variedParameters = variedParameters) } else { designSet <- TrialDesignSet(design = x, singleDesign = TRUE) if (!is.null(plotSettings)) { designSet$.plotSettings <- plotSettings } } .plotTrialDesignSet( x = designSet, y = y, main = main, xlab = xlab, ylab = ylab, type = type, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, showSource = showSource, designSetName = designName, ... ) } #' #' @title #' Coerce TrialDesign to a Data Frame #' #' @description #' Returns the \code{TrialDesign} as data frame. #' #' @param x A \code{\link{TrialDesign}} object. #' @inheritParams param_niceColumnNamesEnabled #' @inheritParams param_includeAllParameters #' @inheritParams param_three_dots #' #' @details #' Each element of the \code{\link{TrialDesign}} is converted to a column in the data frame. #' #' @template return_dataframe #' #' @examples #' as.data.frame(getDesignGroupSequential()) #' #' @export #' #' @keywords internal #' as.data.frame.TrialDesign <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, ...) { .assertIsTrialDesign(x) if (includeAllParameters) { parameterNames <- NULL } else { parameterNames <- x$.getParametersToShow() } return(.getAsDataFrame( parameterSet = x, parameterNames = parameterNames, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters, tableColumnNames = .getTableColumnNames(design = x) )) } rpact/R/f_simulation_multiarm_survival.R0000644000176200001440000010653714445307576020300 0ustar liggesusers## | ## | *Simulation of multi-arm design with time to event data* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_simulation_multiarm.R NULL .getSimulationSurvivalMultiArmStageEvents <- function(..., stage, directionUpper, conditionalPower, conditionalCriticalValue, plannedEvents, allocationRatioPlanned, selectedArms, thetaH1, overallEffects, minNumberOfEventsPerStage, maxNumberOfEventsPerStage) { stage <- stage - 1 # to be consistent with non-multiarm situation gMax <- nrow(overallEffects) if (!is.na(conditionalPower)) { if (any(selectedArms[1:gMax, stage + 1], na.rm = TRUE)) { if (is.na(thetaH1)) { if (directionUpper) { thetaStandardized <- log(max(min( overallEffects[selectedArms[1:gMax, stage + 1], stage], na.rm = TRUE ), 1 + 1e-07)) } else { thetaStandardized <- log(min(max( overallEffects[selectedArms[1:gMax, stage + 1], stage], na.rm = TRUE ), 1 - 1e-07)) } } else { if (directionUpper) { thetaStandardized <- log(max(thetaH1, 1 + 1e-07)) } else { thetaStandardized <- log(min(thetaH1, 1 - 1e-07)) } } if (conditionalCriticalValue[stage] > 8) { newEvents <- maxNumberOfEventsPerStage[stage + 1] } else { newEvents <- (1 + allocationRatioPlanned[stage])^2 / allocationRatioPlanned[stage] * (max(0, conditionalCriticalValue[stage] + .getQNorm(conditionalPower), na.rm = TRUE))^2 / thetaStandardized^2 newEvents <- min( max(minNumberOfEventsPerStage[stage + 1], newEvents), maxNumberOfEventsPerStage[stage + 1] ) } } else { newEvents <- 0 } } else { newEvents <- plannedEvents[stage + 1] - plannedEvents[stage] } return(newEvents) } # Correlation matrix according to Deng et al. (2019) accounting for alternative: .getCholeskyDecomposition <- function(allocationRatioPlanned, selectedArms, k, omegaVector) { selectedArmsVec <- selectedArms[, k] probabilityVector <- allocationRatioPlanned[k] * omegaVector[selectedArmsVec] / (1 + allocationRatioPlanned[k] * sum(omegaVector[selectedArmsVec])) armsSelected <- sum(selectedArmsVec) p0 <- 1 / (1 + allocationRatioPlanned[k] * sum(omegaVector[selectedArmsVec])) covMatrix <- matrix(rep(1 / p0, armsSelected^2), ncol = armsSelected, nrow = armsSelected) diag(covMatrix) <- 1 / p0 + 1 / probabilityVector corrMatrix <- cov2cor(covMatrix) choleskyDecomposition <- chol(corrMatrix) return(choleskyDecomposition) } .getSimulatedStageSurvivalMultiArm <- function(..., design, directionUpper, omegaVector, plannedEvents, typeOfSelection, effectMeasure, adaptations, epsilonValue, rValue, threshold, allocationRatioPlanned, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, conditionalPower, thetaH1, calcEventsFunction, calcEventsFunctionIsUserDefined, selectArmsFunction, choleskyDecompositionList, choleskyDecomposition = NULL) { kMax <- length(plannedEvents) gMax <- length(omegaVector) simSurvival <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallEffects <- matrix(NA_real_, nrow = gMax, ncol = kMax) eventsPerStage <- matrix(NA_real_, nrow = gMax, ncol = kMax) singleEventsPerStage <- matrix(NA_real_, nrow = gMax + 1, ncol = kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) conditionalCriticalValue <- rep(NA_real_, kMax - 1) conditionalPowerPerStage <- rep(NA_real_, kMax) selectedArms <- matrix(FALSE, nrow = gMax, ncol = kMax) selectedArms[, 1] <- TRUE adjustedPValues <- rep(NA_real_, kMax) if (.isTrialDesignFisher(design)) { weights <- .getWeightsFisher(design) } else if (.isTrialDesignInverseNormal(design)) { weights <- .getWeightsInverseNormal(design) } for (k in 1:kMax) { for (treatmentArm in 1:gMax) { if (selectedArms[treatmentArm, k]) { if (k == 1) { eventsPerStage[treatmentArm, k] <- plannedEvents[k] * (allocationRatioPlanned[k] * omegaVector[treatmentArm] + 1) / (allocationRatioPlanned[k] * sum(omegaVector) + 1) } else { eventsPerStage[treatmentArm, k] <- (plannedEvents[k] - plannedEvents[k - 1]) * (allocationRatioPlanned[k] * omegaVector[treatmentArm] + 1) / (allocationRatioPlanned[k] * sum(omegaVector[selectedArms[, k]]) + 1) } if (eventsPerStage[treatmentArm, k] > 0) { testStatistics[treatmentArm, k] <- stats::rnorm(1, 0, 1) } } } if (is.null(choleskyDecomposition)) { key <- paste0(selectedArms[, k], collapse = "") choleskyDecomposition <- choleskyDecompositionList[[key]] if (is.null(choleskyDecomposition)) { choleskyDecomposition <- .getCholeskyDecomposition(allocationRatioPlanned, selectedArms, k, omegaVector) choleskyDecompositionList[[key]] <- choleskyDecomposition } testStatistics[!is.na(testStatistics[, k]), k] <- t(choleskyDecomposition) %*% testStatistics[!is.na(testStatistics[, k]), k] } else { testStatistics[!is.na(testStatistics[, k]), k] <- t(choleskyDecomposition[1:sum(selectedArms[, k]), 1:sum(selectedArms[, k])]) %*% testStatistics[!is.na(testStatistics[, k]), k] } for (treatmentArm in 1:gMax) { if (selectedArms[treatmentArm, k]) { testStatistics[treatmentArm, k] <- testStatistics[treatmentArm, k] + (2 * directionUpper - 1) * log(omegaVector[treatmentArm]) * sqrt(eventsPerStage[treatmentArm, k]) * sqrt(allocationRatioPlanned[k]) / (1 + allocationRatioPlanned[k]) separatePValues[treatmentArm, k] <- 1 - stats::pnorm(testStatistics[treatmentArm, k]) overallTestStatistics[treatmentArm, k] <- sqrt(eventsPerStage[treatmentArm, 1:k]) %*% testStatistics[treatmentArm, 1:k] / sqrt(sum(eventsPerStage[treatmentArm, 1:k])) overallEffects[treatmentArm, k] <- exp((2 * directionUpper - 1) * overallTestStatistics[treatmentArm, k] * (1 + allocationRatioPlanned[k]) / sqrt(allocationRatioPlanned[k]) / sqrt(sum(eventsPerStage[treatmentArm, 1:k]))) } } if (k < kMax) { if (colSums(selectedArms)[k] == 0) { break } # Bonferroni adjustment adjustedPValues[k] <- min(min(separatePValues[, k], na.rm = TRUE) * (colSums(selectedArms)[k]), 1 - 1e-7) # conditional critical value to reject the null hypotheses at the next stage of the trial if (.isTrialDesignConditionalDunnett(design)) { conditionalCriticalValue[k] <- (.getOneMinusQNorm(design$alpha) - .getOneMinusQNorm(adjustedPValues[k]) * sqrt(design$informationAtInterim)) / sqrt(1 - design$informationAtInterim) } else { if (.isTrialDesignFisher(design)) { conditionalCriticalValue[k] <- .getOneMinusQNorm(min((design$criticalValues[k + 1] / prod(adjustedPValues[1:k]^weights[1:k]))^(1 / weights[k + 1]), 1 - 1e-7)) } else { conditionalCriticalValue[k] <- (design$criticalValues[k + 1] * sqrt(design$informationRates[k + 1]) - .getOneMinusQNorm(adjustedPValues[1:k]) %*% weights[1:k]) / sqrt(design$informationRates[k + 1] - design$informationRates[k]) } } if (adaptations[k]) { if (effectMeasure == "testStatistic") { selectedArms[, k + 1] <- (selectedArms[, k] & .selectTreatmentArms(k, overallTestStatistics[, k], typeOfSelection, epsilonValue, rValue, threshold, selectArmsFunction, survival = TRUE )) } else if (effectMeasure == "effectEstimate") { if (directionUpper) { selectedArms[, k + 1] <- (selectedArms[, k] & .selectTreatmentArms(k, overallEffects[, k], typeOfSelection, epsilonValue, rValue, threshold, selectArmsFunction, survival = TRUE )) } else { selectedArms[, k + 1] <- (selectedArms[, k] & .selectTreatmentArms(k, 1 / overallEffects[, k], typeOfSelection, epsilonValue, rValue, 1 / threshold, selectArmsFunction, survival = TRUE )) } } newEvents <- calcEventsFunction( stage = k + 1, # to be consistent with non-multiarm situation, cf. line 38 directionUpper = directionUpper, conditionalPower = conditionalPower, conditionalCriticalValue = conditionalCriticalValue, plannedEvents = plannedEvents, allocationRatioPlanned = allocationRatioPlanned, selectedArms = selectedArms, thetaH1 = thetaH1, overallEffects = overallEffects, minNumberOfEventsPerStage = minNumberOfEventsPerStage, maxNumberOfEventsPerStage = maxNumberOfEventsPerStage ) if (is.null(newEvents) || length(newEvents) != 1 || !is.numeric(newEvents) || is.na(newEvents)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'calcEventsFunction' returned an illegal or undefined result (", newEvents, "); ", "the output must be a single numeric value" ) } if (!is.na(conditionalPower) || calcEventsFunctionIsUserDefined) { plannedEvents[(k + 1):kMax] <- plannedEvents[k] + cumsum(rep(newEvents, kMax - k)) } } else { selectedArms[, k + 1] <- selectedArms[, k] } if (is.na(thetaH1)) { thetaStandardized <- log(min(overallEffects[selectedArms[1:gMax, k], k], na.rm = TRUE)) } else { thetaStandardized <- log(thetaH1) } thetaStandardized <- (2 * directionUpper - 1) * thetaStandardized conditionalPowerPerStage[k] <- 1 - stats::pnorm(conditionalCriticalValue[k] - thetaStandardized * sqrt(plannedEvents[k + 1] - plannedEvents[k]) * sqrt(allocationRatioPlanned[k]) / (1 + allocationRatioPlanned[k])) } } return(list( eventsPerStage = eventsPerStage, plannedEvents = plannedEvents, allocationRatioPlanned = allocationRatioPlanned, overallEffects = overallEffects, testStatistics = testStatistics, overallTestStatistics = overallTestStatistics, separatePValues = separatePValues, conditionalCriticalValue = conditionalCriticalValue, conditionalPowerPerStage = conditionalPowerPerStage, selectedArms = selectedArms, choleskyDecompositionList = choleskyDecompositionList )) } #' #' @title #' Get Simulation Multi-Arm Survival #' #' @description #' Returns the simulated power, stopping and selection probabilities, conditional power, and #' expected sample size for testing hazard ratios in a multi-arm treatment groups testing situation. #' In contrast to \code{getSimulationSurvival()} (where survival times are simulated), normally #' distributed logrank test statistics are simulated. #' #' @param omegaMaxVector Range of hazard ratios with highest response for \code{"linear"} and #' \code{"sigmoidEmax"} model, default is \code{seq(1, 2.6, 0.4)}. #' @inheritParams param_intersectionTest_MultiArm #' @inheritParams param_typeOfSelection #' @inheritParams param_effectMeasure #' @inheritParams param_adaptations #' @inheritParams param_threshold #' @inheritParams param_effectMatrix #' @inheritParams param_activeArms #' @inheritParams param_successCriterion #' @param correlationComputation If \code{correlationComputation = "alternative"}, #' for simulating log-rank statistics in the many-to-one design, a correlation #' matrix according to Deng et al. (Biometrics, 2019) accounting for the #' respective alternative is used; #' if \code{correlationComputation = "null"}, a constant correlation matrix valid #' under the null, i.e., not accounting for the alternative is used, #' default is \code{"alternative"}. #' @inheritParams param_typeOfShape #' @inheritParams param_typeOfSelection #' @inheritParams param_design_with_default #' @inheritParams param_directionUpper #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_minNumberOfEventsPerStage #' @inheritParams param_maxNumberOfEventsPerStage #' @inheritParams param_conditionalPowerSimulation #' @inheritParams param_thetaH1 #' @inheritParams param_plannedEvents #' @inheritParams param_maxNumberOfIterations #' @inheritParams param_calcEventsFunction #' @inheritParams param_selectArmsFunction #' @inheritParams param_rValue #' @inheritParams param_epsilonValue #' @inheritParams param_gED50 #' @inheritParams param_slope #' @inheritParams param_seed #' @inheritParams param_three_dots #' @inheritParams param_showStatistics #' #' @details #' At given design the function simulates the power, stopping probabilities, #' selection probabilities, and expected sample size at given number of subjects, #' parameter configuration, and treatment arm selection rule in the multi-arm situation. #' An allocation ratio can be specified referring to the ratio of number of subjects #' in the active treatment groups as compared to the control group. #' #' The definition of \code{thetaH1} makes only sense if \code{kMax} > 1 #' and if \code{conditionalPower}, \code{minNumberOfEventsPerStage}, and #' \code{maxNumberOfEventsPerStage} (or \code{calcEventsFunction}) are defined. #' #' \code{calcEventsFunction}\cr #' This function returns the number of events at given conditional power #' and conditional critical value for specified testing situation. #' The function might depend on the variables #' \code{stage}, #' \code{selectedArms}, #' \code{plannedEvents}, #' \code{directionUpper}, #' \code{allocationRatioPlanned}, #' \code{minNumberOfEventsPerStage}, #' \code{maxNumberOfEventsPerStage}, #' \code{conditionalPower}, #' \code{conditionalCriticalValue}, and #' \code{overallEffects}. #' The function has to contain the three-dots argument '...' (see examples). #' #' @template return_object_simulation_results #' @template how_to_get_help_for_generics #' #' @template examples_get_simulation_multiarm_survival #' #' @export #' getSimulationMultiArmSurvival <- function(design = NULL, ..., activeArms = 3L, # C_ACTIVE_ARMS_DEFAULT effectMatrix = NULL, typeOfShape = c("linear", "sigmoidEmax", "userDefined"), # C_TYPE_OF_SHAPE_DEFAULT omegaMaxVector = seq(1, 2.6, 0.4), # C_RANGE_OF_HAZARD_RATIOS_DEFAULT gED50 = NA_real_, slope = 1, intersectionTest = c("Dunnett", "Bonferroni", "Simes", "Sidak", "Hierarchical"), # C_INTERSECTION_TEST_MULTIARMED_DEFAULT directionUpper = TRUE, # C_DIRECTION_UPPER_DEFAULT adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), # C_TYPE_OF_SELECTION_DEFAULT effectMeasure = c("effectEstimate", "testStatistic"), # C_EFFECT_MEASURE_DEFAULT successCriterion = c("all", "atLeastOne"), # C_SUCCESS_CRITERION_DEFAULT correlationComputation = c("alternative", "null"), epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedEvents = NA_real_, allocationRatioPlanned = NA_real_, minNumberOfEventsPerStage = NA_real_, maxNumberOfEventsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, maxNumberOfIterations = 1000L, # C_MAX_SIMULATION_ITERATIONS_DEFAULT seed = NA_real_, calcEventsFunction = NULL, selectArmsFunction = NULL, showStatistics = FALSE) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "simulation") .warnInCaseOfUnknownArguments( functionName = "getSimulationMultiArmSurvival", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE ), "showStatistics"), ... ) } else { .assertIsTrialDesignInverseNormalOrFisherOrConditionalDunnett(design) .warnInCaseOfUnknownArguments(functionName = "getSimulationMultiArmSurvival", ignore = "showStatistics", ...) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsOneSidedDesign(design, designType = "multi-arm", engineType = "simulation") correlationComputation <- match.arg(correlationComputation) calcEventsFunctionIsUserDefined <- !is.null(calcEventsFunction) simulationResults <- .createSimulationResultsMultiArmObject( design = design, activeArms = activeArms, effectMatrix = effectMatrix, typeOfShape = typeOfShape, omegaMaxVector = omegaMaxVector, # survival only gED50 = gED50, slope = slope, intersectionTest = intersectionTest, directionUpper = directionUpper, # rates + survival only adaptations = adaptations, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, successCriterion = successCriterion, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, plannedEvents = plannedEvents, # survival only allocationRatioPlanned = allocationRatioPlanned, minNumberOfEventsPerStage = minNumberOfEventsPerStage, # survival only maxNumberOfEventsPerStage = maxNumberOfEventsPerStage, # survival only conditionalPower = conditionalPower, thetaH1 = thetaH1, # means + survival only maxNumberOfIterations = maxNumberOfIterations, seed = seed, calcEventsFunction = calcEventsFunction, # survival only selectArmsFunction = selectArmsFunction, showStatistics = showStatistics, endpoint = "survival" ) design <- simulationResults$.design successCriterion <- simulationResults$successCriterion effectMeasure <- simulationResults$effectMeasure adaptations <- simulationResults$adaptations gMax <- activeArms kMax <- simulationResults$.design$kMax intersectionTest <- simulationResults$intersectionTest typeOfSelection <- simulationResults$typeOfSelection effectMatrix <- t(simulationResults$effectMatrix) omegaMaxVector <- simulationResults$omegaMaxVector # survival only thetaH1 <- simulationResults$thetaH1 # means + survival only plannedEvents <- simulationResults$plannedEvents # survival only conditionalPower <- simulationResults$conditionalPower minNumberOfEventsPerStage <- simulationResults$minNumberOfEventsPerStage # survival only maxNumberOfEventsPerStage <- simulationResults$maxNumberOfEventsPerStage # survival only allocationRatioPlanned <- simulationResults$allocationRatioPlanned calcEventsFunction <- simulationResults$calcEventsFunction if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, kMax) } simulationResults$correlationComputation <- correlationComputation if (correlationComputation != "alternative") { simulationResults$.setParameterType("correlationComputation", C_PARAM_USER_DEFINED) } indices <- .getIndicesOfClosedHypothesesSystemForSimulation(gMax = gMax) if (.isTrialDesignConditionalDunnett(design)) { criticalValuesDunnett <- .getCriticalValuesDunnettForSimulation( alpha = design$alpha, indices = indices, allocationRatioPlanned = allocationRatioPlanned ) } cols <- length(omegaMaxVector) simulatedSelections <- array(0, dim = c(kMax, cols, gMax)) simulatedRejections <- array(0, dim = c(kMax, cols, gMax)) simulatedNumberOfActiveArms <- matrix(0, nrow = kMax, ncol = cols) simulatedSingleEventsPerStage <- array(0, dim = c(kMax, cols, gMax + 1)) simulatedOverallEventsPerStage <- matrix(0, nrow = kMax, ncol = cols) simulatedSuccessStopping <- matrix(0, nrow = kMax, ncol = cols) simulatedFutilityStopping <- matrix(0, nrow = kMax - 1, ncol = cols) simulatedConditionalPower <- matrix(0, nrow = kMax, ncol = cols) simulatedRejectAtLeastOne <- rep(0, cols) expectedNumberOfEvents <- rep(0, cols) iterations <- matrix(0, nrow = kMax, ncol = cols) probabilityVector <- rep(NA_real_, cols) len <- maxNumberOfIterations * kMax * gMax * cols dataIterationNumber <- rep(NA_real_, len) dataStageNumber <- rep(NA_real_, len) dataArmNumber <- rep(NA_real_, len) dataAlternative <- rep(NA_real_, len) dataEffect <- rep(NA_real_, len) dataNumberOfEvents <- rep(NA_real_, len) dataRejectPerStage <- rep(NA, len) dataFutilityStop <- rep(NA_real_, len) dataSuccessStop <- rep(NA, len) dataFutilityStop <- rep(NA, len) dataTestStatistics <- rep(NA_real_, len) dataConditionalCriticalValue <- rep(NA_real_, len) dataConditionalPowerAchieved <- rep(NA_real_, len) dataEffectEstimate <- rep(NA_real_, len) dataPValuesSeparate <- rep(NA_real_, len) choleskyDecomposition <- NULL if (correlationComputation == "null") { # not accounting for alternative corrMatrix <- matrix(rep(allocationRatioPlanned / (1 + allocationRatioPlanned), gMax^2), ncol = gMax, nrow = gMax) diag(corrMatrix) <- 1 choleskyDecomposition <- chol(corrMatrix) } index <- 1 for (i in 1:cols) { choleskyDecompositionList <- list() for (j in 1:maxNumberOfIterations) { stageResults <- .getSimulatedStageSurvivalMultiArm( design = design, directionUpper = directionUpper, omegaVector = effectMatrix[i, ], plannedEvents = plannedEvents, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, adaptations = adaptations, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, allocationRatioPlanned = allocationRatioPlanned, minNumberOfEventsPerStage = minNumberOfEventsPerStage, maxNumberOfEventsPerStage = maxNumberOfEventsPerStage, conditionalPower = conditionalPower, thetaH1 = thetaH1, calcEventsFunction = calcEventsFunction, calcEventsFunctionIsUserDefined = calcEventsFunctionIsUserDefined, selectArmsFunction = selectArmsFunction, choleskyDecompositionList = choleskyDecompositionList, choleskyDecomposition = choleskyDecomposition ) choleskyDecompositionList <- stageResults$choleskyDecompositionList if (.isTrialDesignConditionalDunnett(design)) { closedTest <- .performClosedConditionalDunnettTestForSimulation( stageResults = stageResults, design = design, indices = indices, criticalValuesDunnett = criticalValuesDunnett, successCriterion = successCriterion ) } else { closedTest <- .performClosedCombinationTestForSimulationMultiArm( stageResults = stageResults, design = design, indices = indices, intersectionTest = intersectionTest, successCriterion = successCriterion ) } rejectAtSomeStage <- FALSE rejectedArmsBefore <- rep(FALSE, gMax) for (k in 1:kMax) { simulatedRejections[k, i, ] <- simulatedRejections[k, i, ] + (closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore) simulatedSelections[k, i, ] <- simulatedSelections[k, i, ] + closedTest$selectedArms[, k] simulatedNumberOfActiveArms[k, i] <- simulatedNumberOfActiveArms[k, i] + sum(closedTest$selectedArms[, k]) if (!any(is.na(closedTest$successStop))) { simulatedSuccessStopping[k, i] <- simulatedSuccessStopping[k, i] + closedTest$successStop[k] } if ((kMax > 1) && (k < kMax)) { if (!any(is.na(closedTest$futilityStop))) { simulatedFutilityStopping[k, i] <- simulatedFutilityStopping[k, i] + (closedTest$futilityStop[k] && !closedTest$successStop[k]) } if (!closedTest$successStop[k] && !closedTest$futilityStop[k]) { simulatedConditionalPower[k + 1, i] <- simulatedConditionalPower[k + 1, i] + stageResults$conditionalPowerPerStage[k] } } iterations[k, i] <- iterations[k, i] + 1 if (k == 1) { simulatedOverallEventsPerStage[k, i] <- simulatedOverallEventsPerStage[k, i] + stageResults$plannedEvents[k] for (g in 1:gMax) { if (closedTest$selectedArms[g, k]) { simulatedSingleEventsPerStage[k, i, g] <- simulatedSingleEventsPerStage[k, i, g] + stageResults$plannedEvents[k] * allocationRatioPlanned[k] * effectMatrix[i, g] / (1 + allocationRatioPlanned[k] * sum(effectMatrix[i, closedTest$selectedArms[, k]])) } } simulatedSingleEventsPerStage[k, i, gMax + 1] <- simulatedSingleEventsPerStage[k, i, gMax + 1] + stageResults$plannedEvents[k] / (1 + allocationRatioPlanned[k] * sum(effectMatrix[i, closedTest$selectedArms[, k]])) } else { simulatedOverallEventsPerStage[k, i] <- simulatedOverallEventsPerStage[k, i] + stageResults$plannedEvents[k] - stageResults$plannedEvents[k - 1] for (g in 1:gMax) { if (closedTest$selectedArms[g, k]) { simulatedSingleEventsPerStage[k, i, g] <- simulatedSingleEventsPerStage[k, i, g] + (stageResults$plannedEvents[k] - stageResults$plannedEvents[k - 1]) * allocationRatioPlanned[k] * effectMatrix[i, g] / (1 + allocationRatioPlanned[k] * sum(effectMatrix[i, closedTest$selectedArms[, k]])) } } simulatedSingleEventsPerStage[k, i, gMax + 1] <- simulatedSingleEventsPerStage[k, i, gMax + 1] + (stageResults$plannedEvents[k] - stageResults$plannedEvents[k - 1]) / (1 + allocationRatioPlanned[k] * sum(effectMatrix[i, closedTest$selectedArms[, k]])) } for (g in 1:gMax) { dataIterationNumber[index] <- j dataStageNumber[index] <- k dataArmNumber[index] <- g dataAlternative[index] <- omegaMaxVector[i] dataEffect[index] <- effectMatrix[i, g] dataNumberOfEvents[index] <- round(stageResults$eventsPerStage[g, k], 1) dataRejectPerStage[index] <- closedTest$rejected[g, k] dataTestStatistics[index] <- stageResults$testStatistics[g, k] dataSuccessStop[index] <- closedTest$successStop[k] if (k < kMax) { dataFutilityStop[index] <- closedTest$futilityStop[k] dataConditionalCriticalValue[index] <- stageResults$conditionalCriticalValue[k] dataConditionalPowerAchieved[index + 1] <- stageResults$conditionalPowerPerStage[k] } dataEffectEstimate[index] <- stageResults$overallEffects[g, k] dataPValuesSeparate[index] <- closedTest$separatePValues[g, k] index <- index + 1 } if (!rejectAtSomeStage && any(closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore)) { simulatedRejectAtLeastOne[i] <- simulatedRejectAtLeastOne[i] + 1 rejectAtSomeStage <- TRUE } if ((k < kMax) && (closedTest$successStop[k] || closedTest$futilityStop[k])) { # rejected hypotheses remain rejected also in case of early stopping simulatedRejections[(k + 1):kMax, i, ] <- simulatedRejections[(k + 1):kMax, i, ] + matrix((closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore), kMax - k, gMax, byrow = TRUE ) break } rejectedArmsBefore <- closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore } } simulatedOverallEventsPerStage[, i] <- simulatedOverallEventsPerStage[, i] / iterations[, i] simulatedSingleEventsPerStage[, i, ] <- simulatedSingleEventsPerStage[, i, ] / iterations[, i] if (kMax > 1) { simulatedRejections[2:kMax, i, ] <- simulatedRejections[2:kMax, i, ] - simulatedRejections[1:(kMax - 1), i, ] stopping <- cumsum(simulatedSuccessStopping[1:(kMax - 1), i] + simulatedFutilityStopping[, i]) / maxNumberOfIterations expectedNumberOfEvents[i] <- simulatedOverallEventsPerStage[1, i] + t(1 - stopping) %*% simulatedOverallEventsPerStage[2:kMax, i] } else { expectedNumberOfEvents[i] <- simulatedOverallEventsPerStage[1, i] } } simulatedConditionalPower[1, ] <- NA_real_ if (kMax > 1) { simulatedConditionalPower[2:kMax, ] <- simulatedConditionalPower[2:kMax, ] / iterations[2:kMax, ] } simulationResults$rejectAtLeastOne <- simulatedRejectAtLeastOne / maxNumberOfIterations simulationResults$numberOfActiveArms <- simulatedNumberOfActiveArms / iterations simulationResults$selectedArms <- simulatedSelections / maxNumberOfIterations simulationResults$rejectedArmsPerStage <- simulatedRejections / maxNumberOfIterations simulationResults$successPerStage <- simulatedSuccessStopping / maxNumberOfIterations simulationResults$futilityPerStage <- simulatedFutilityStopping / maxNumberOfIterations simulationResults$futilityStop <- base::colSums(simulatedFutilityStopping / maxNumberOfIterations) if (kMax > 1) { simulationResults$earlyStop <- simulationResults$futilityPerStage + simulationResults$successPerStage[1:(kMax - 1), ] simulationResults$conditionalPowerAchieved <- simulatedConditionalPower } simulationResults$eventsPerStage <- .convertStageWiseToOverallValues(simulatedSingleEventsPerStage) for (g in (1:gMax)) { simulationResults$eventsPerStage[, , g] <- simulationResults$eventsPerStage[, , g] + simulationResults$eventsPerStage[, , gMax + 1] } simulationResults$eventsPerStage <- .removeLastEntryFromArray(simulationResults$eventsPerStage) simulationResults$singleNumberOfEventsPerStage <- simulatedSingleEventsPerStage simulationResults$.setParameterType("singleNumberOfEventsPerStage", C_PARAM_GENERATED) simulationResults$expectedNumberOfEvents <- expectedNumberOfEvents simulationResults$iterations <- iterations if (!all(is.na(simulationResults$conditionalPowerAchieved))) { simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } if (any(simulationResults$rejectedArmsPerStage < 0)) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "internal error, simulation not possible due to numerical overflow" ) } data <- data.frame( iterationNumber = dataIterationNumber, stageNumber = dataStageNumber, armNumber = dataArmNumber, omegaMax = dataAlternative, effect = dataEffect, numberOfEvents = dataNumberOfEvents, effectEstimate = dataEffectEstimate, testStatistics = dataTestStatistics, pValue = dataPValuesSeparate, conditionalCriticalValue = round(dataConditionalCriticalValue, 6), conditionalPowerAchieved = round(dataConditionalPowerAchieved, 6), rejectPerStage = dataRejectPerStage, successStop = dataSuccessStop, futilityPerStage = dataFutilityStop ) data <- data[!is.na(data$effectEstimate), ] simulationResults$.data <- data return(simulationResults) } rpact/R/f_core_assertions.R0000644000176200001440000031202414445307575015436 0ustar liggesusers## | ## | *Core assertions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R NULL .stopWithWrongDesignMessage <- function(design, ..., inclusiveConditionalDunnett = TRUE) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of ", .arrayToString( .getTrialDesignClassNames(inclusiveConditionalDunnett = inclusiveConditionalDunnett), vectorLookAndFeelEnabled = FALSE ), " (is '", .getClassName(design), "')") } .stopWithWrongDesignMessageEnrichment <- function(design, ..., inclusiveConditionalDunnett = TRUE) { trialDesignClassNames <- c(C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, C_CLASS_NAME_TRIAL_DESIGN_FISHER) stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of ", .arrayToString( trialDesignClassNames, vectorLookAndFeelEnabled = FALSE ), " (is '", .getClassName(design), "')") } .isParameterSet <- function(x) { return(isS4(x) && inherits(x, "ParameterSet")) } .assertIsParameterSetClass <- function(x, objectName = "x") { if (!.isParameterSet(x)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", objectName, "' (", .getClassName(x), ") must be a S4 class which inherits from class 'ParameterSet' " ) } } .assertIsTrialDesignSet <- function(x, objectName = "x") { if (!.isTrialDesignSet(x)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designSet' must be an instance of 'TrialDesignSet' (is '", .getClassName(x), "')" ) } } .isTrialDesignSet <- function(x) { return(.getClassName(x) == "TrialDesignSet") } .isTrialDesignGroupSequential <- function(design) { return(.getClassName(design) == C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL) } .isTrialDesignInverseNormal <- function(design) { return(.getClassName(design) == C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL) } .isTrialDesignFisher <- function(design) { return(.getClassName(design) == C_CLASS_NAME_TRIAL_DESIGN_FISHER) } .isTrialDesignConditionalDunnett <- function(design) { return(.getClassName(design) == C_CLASS_NAME_TRIAL_DESIGN_CONDITIONAL_DUNNETT) } .isTrialDesignInverseNormalOrGroupSequential <- function(design) { return(.isTrialDesignInverseNormal(design) || .isTrialDesignGroupSequential(design)) } .isTrialDesignInverseNormalOrFisher <- function(design) { return(.isTrialDesignInverseNormal(design) || .isTrialDesignFisher(design)) } .isTrialDesign <- function(design) { return(.isTrialDesignInverseNormal(design) || .isTrialDesignGroupSequential(design) || .isTrialDesignFisher(design) || .isTrialDesignConditionalDunnett(design)) } .isTrialDesignPlanMeans <- function(designPlan) { return(.getClassName(designPlan) == "TrialDesignPlanMeans") } .isTrialDesignPlanRates <- function(designPlan) { return(.getClassName(designPlan) == "TrialDesignPlanRates") } .isTrialDesignPlanSurvival <- function(designPlan) { return(.getClassName(designPlan) == "TrialDesignPlanSurvival") } .isTrialDesignPlan <- function(designPlan) { return(.isTrialDesignPlanMeans(designPlan) || .isTrialDesignPlanRates(designPlan) || .isTrialDesignPlanSurvival(designPlan)) } .assertIsTrialDesignPlan <- function(designPlan) { if (!.isTrialDesignPlan(designPlan)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designPlan' must be an instance of 'TrialDesignPlan' (is '", .getClassName(designPlan), "')" ) } } .assertIsTrialDesign <- function(design) { if (!.isTrialDesign(design)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of ", .arrayToString( .getTrialDesignClassNames(), vectorLookAndFeelEnabled = FALSE ), " (is '", .getClassName(design), "')") } } .assertIsTrialDesignInverseNormal <- function(design) { if (!.isTrialDesignInverseNormal(design)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignInverseNormal' (is '", .getClassName(design), "')" ) } } .assertIsTrialDesignFisher <- function(design) { if (!.isTrialDesignFisher(design)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignFisher' (is '", .getClassName(design), "')" ) } } .assertIsTrialDesignGroupSequential <- function(design) { if (!.isTrialDesignGroupSequential(design)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignGroupSequential' (is '", .getClassName(design), "')" ) } } .assertIsTrialDesignConditionalDunnett <- function(design) { if (!.isTrialDesignConditionalDunnett(design)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignConditionalDunnett' (is '", .getClassName(design), "')" ) } } .assertIsTrialDesignInverseNormalOrGroupSequential <- function(design) { if (!.isTrialDesignInverseNormalOrGroupSequential(design)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignInverseNormal' or 'TrialDesignGroupSequential' (is '", .getClassName(design), "')" ) } } .assertIsTrialDesignInverseNormalOrFisher <- function(design) { if (!.isTrialDesignInverseNormalOrFisher(design)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignInverseNormal' or 'TrialDesignFisher' (is '", .getClassName(design), "')" ) } } .assertIsTrialDesignInverseNormalOrFisherOrConditionalDunnett <- function(design) { if (!.isTrialDesignInverseNormalOrFisher(design) && !.isTrialDesignConditionalDunnett(design)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of class 'TrialDesignInverseNormal', ", "'TrialDesignFisher', or 'TrialDesignConditionalDunnett' (is '", .getClassName(design), "')" ) } } .isSimulationResults <- function(simulationResults) { return(inherits(simulationResults, "SimulationResults")) } .assertIsSimulationResults <- function(simulationResults) { if (!.isSimulationResults(simulationResults)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'simulationResults' must be an instance of SimulationResults (is '", .getClassName(simulationResults), "')" ) } } .isStageResults <- function(stageResults) { return(inherits(stageResults, "StageResults")) } .isStageResultsMultiArmMeans <- function(stageResults) { return(.getClassName(stageResults) == "StageResultsMultiArmMeans") } .isStageResultsMultiArmSurvival <- function(stageResults) { return(.getClassName(stageResults) == "StageResultsMultiArmSurvival") } .isStageResultsEnrichmentMeans <- function(stageResults) { return(.getClassName(stageResults) == "StageResultsEnrichmentMeans") } .isStageResultsEnrichmentSurvival <- function(stageResults) { return(.getClassName(stageResults) == "StageResultsEnrichmentSurvival") } .assertIsStageResults <- function(stageResults) { if (!.isStageResults(stageResults)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stageResults' must be a 'StageResults' object", " (is '", .getClassName(stageResults), "')" ) } } .assertIsInClosedInterval <- function(x, xName, ..., lower, upper, naAllowed = FALSE, call. = TRUE) { .warnInCaseOfUnknownArguments(functionName = ".assertIsInClosedInterval", ...) if (naAllowed && all(is.na(x))) { return(invisible()) } if (!naAllowed && length(x) > 1 && any(is.na(x))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", xName, "' (", .arrayToString(x), ") must be a valid numeric vector or a single NA", call. = call. ) } if (is.null(upper) || is.na(upper)) { if (any(x < lower, na.rm = TRUE)) { prefix <- ifelse(length(x) > 1, "each value of ", "") stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, prefix, "'", xName, "' (", .arrayToString(x), ") must be >= ", lower, call. = call. ) } } else if (any(x < lower, na.rm = TRUE) || any(x > upper, na.rm = TRUE)) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'", xName, "' (", .arrayToString(x), ") is out of bounds [", lower, "; ", upper, "]", call. = call. ) } } .assertIsInOpenInterval <- function(x, xName, lower, upper, naAllowed = FALSE, call. = TRUE) { if (naAllowed && all(is.na(x))) { return(invisible()) } if (!naAllowed && length(x) > 1 && any(is.na(x))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", xName, "' (", .arrayToString(x), ") must be a valid numeric vector or a single NA" ) } if (is.null(upper) || is.na(upper)) { if (any(x <= lower, na.rm = TRUE)) { prefix <- ifelse(length(x) > 1, "each value of ", "") stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, prefix, "'", xName, "' (", .arrayToString(x), ") must be > ", lower, call. = call. ) } } else if (any(x <= lower, na.rm = TRUE) || any(x >= upper, na.rm = TRUE)) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'", xName, "' (", .arrayToString(x), ") is out of bounds (", lower, "; ", upper, ")", call. = call. ) } } .assertIsValidDataInput <- function(dataInput, design = NULL, stage = NULL) { .assertIsDataset(dataInput) if (!is.null(design)) { .assertIsTrialDesign(design) } if (dataInput$.enrichmentEnabled && dataInput$getNumberOfGroups() != 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "only population enrichment data with 2 groups can be analyzed but ", dataInput$getNumberOfGroups(), " group", ifelse(dataInput$getNumberOfGroups() == 1, " is", "s are"), " defined" ) } stages <- dataInput$stages l1 <- length(stages) for (fieldName in dataInput$.getVisibleFieldNames()) { l2 <- length(dataInput[[fieldName]]) if (fieldName != "stages" && l1 != l2) { stop( C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all parameters must have the same length ('stages' has length ", l1, ", '", fieldName, "' has length ", l2, ")" ) } } if (!is.null(stage)) { if (dataInput$getNumberOfGroups() == 1) { if (.isDatasetMeans(dataInput)) { if (any(na.omit(dataInput$getStDevsUpTo(stage)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all standard deviations must be > 0") } if (any(na.omit(dataInput$getSampleSizesUpTo(stage)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all sample sizes must be > 0") } } else if (.isDatasetRates(dataInput)) { if (any(na.omit(dataInput$getEventsUpTo(stage)) < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all events must be >= 0") } if (any(na.omit(dataInput$getSampleSizesUpTo(stage)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all sample sizes must be > 0") } if (any(na.omit(dataInput$getEventsUpTo(stage)) > na.omit(dataInput$getSampleSizesUpTo(stage)))) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all events must be <= corresponding sample size") } } } else if (dataInput$getNumberOfGroups() == 2) { if (.isDatasetMeans(dataInput)) { if (any(na.omit(dataInput$getStDevsUpTo(stage, 1)) <= 0) || any(na.omit(dataInput$getStDevsUpTo(stage, 2)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all standard deviations must be > 0") } if (any(na.omit(dataInput$getSampleSizesUpTo(stage, 1)) <= 0) || any(na.omit(dataInput$getSampleSizesUpTo(stage, 2)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all sample sizes must be > 0") } } else if (.isDatasetRates(dataInput)) { if (any(na.omit(dataInput$getEventsUpTo(stage, 1)) < 0) || any(na.omit(dataInput$getEventsUpTo(stage, 2)) < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all events must be >= 0") } if (any(na.omit(dataInput$getSampleSizesUpTo(stage, 1)) <= 0) || any(na.omit(dataInput$getSampleSizesUpTo(stage, 2)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all sample sizes must be > 0") } if (any(na.omit(dataInput$getEventsUpTo(stage, 1)) > na.omit(dataInput$getSampleSizesUpTo(stage, 1))) || any(na.omit(dataInput$getEventsUpTo(stage, 2)) > na.omit(dataInput$getSampleSizesUpTo(stage, 2)))) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all events must be <= corresponding sample size") } } } if (.isDatasetSurvival(dataInput)) { if (any(na.omit(dataInput$getOverallEventsUpTo(stage)) < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all cumulative events must be >= 0") } if (any(na.omit(dataInput$getOverallAllocationRatiosUpTo(stage)) <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT, "all cumulative allocation ratios must be > 0") } } } if (!is.null(design)) { numberOfStages <- length(unique(stats::na.omit(stages))) kMax <- design$kMax if (numberOfStages > kMax) { s <- numberOfStages - kMax plural <- ifelse(s == 1, "", "s") warning(sprintf( paste0( "The data of the last %s in the dataset will be ", "ignored because the design has specified kMax = %s" ), ifelse(s == 1, "stage", paste0(s, " stages")), kMax ), call. = FALSE) } else if (numberOfStages < kMax) { dataInput$.fillWithNAs(kMax) } } invisible(dataInput) } .assertIsDataset <- function(dataInput) { if (!.isDataset(dataInput)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' must be an instance of class ", "'DatasetMeans', 'DatasetRates' or 'DatasetSurvival' (is '", .getClassName(dataInput), "')" ) } } .assertIsDatasetMeans <- function(dataInput) { if (!.isDatasetMeans(dataInput = dataInput)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' must be an instance of class ", "'DatasetMeans' (is '", .getClassName(dataInput), "')" ) } } .assertIsDatasetRates <- function(dataInput) { if (!.isDatasetRates(dataInput = dataInput)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' must be an instance of class ", "'DatasetRates' (is '", .getClassName(dataInput), "')" ) } } .assertIsDatasetSurvival <- function(dataInput) { if (!.isDatasetSurvival(dataInput = dataInput)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' must be an instance of class ", "'DatasetSurvival' or 'DatasetEnrichmentSurvival' (is '", .getClassName(dataInput), "')" ) } } .isDataset <- function(dataInput) { return(.isDatasetMeans(dataInput) || .isDatasetRates(dataInput) || .isDatasetSurvival(dataInput)) } .isDatasetMeans <- function(dataInput) { return(inherits(dataInput, "DatasetMeans")) } .isDatasetRates <- function(dataInput) { return(inherits(dataInput, "DatasetRates")) } .isDatasetSurvival <- function(dataInput) { return(inherits(dataInput, "DatasetSurvival") || inherits(dataInput, "DatasetEnrichmentSurvival")) } .assertIsNumericVector <- function(x, argumentName, ..., naAllowed = FALSE, noDefaultAvailable = FALSE, call. = TRUE) { if (missing(x) || is.null(x) || length(x) == 0) { .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = FALSE) stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid numeric value or vector", call. = call. ) } .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = TRUE) if ((!naAllowed && any(is.na(x))) || !is.numeric(x)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .arrayToString(x), ") must be a valid numeric value or vector", call. = call. ) } } .assertIsIntegerVector <- function(x, argumentName, ..., naAllowed = FALSE, validateType = TRUE, noDefaultAvailable = FALSE, call. = TRUE) { if (missing(x) || is.null(x) || length(x) == 0) { .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = FALSE) stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid integer value or vector", call. = call. ) } .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = TRUE) if (naAllowed && all(is.na(x))) { return(invisible()) } if (!is.numeric(x) || (!naAllowed && any(is.na(x))) || (validateType && !is.integer(x)) || (!validateType && any(as.integer(na.omit(x)) != na.omit(x)))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .arrayToString(x), ") must be a valid integer value or vector", call. = call. ) } } .assertIsLogicalVector <- function(x, argumentName, ..., naAllowed = FALSE, noDefaultAvailable = FALSE, call. = TRUE) { if (missing(x) || is.null(x) || length(x) == 0) { .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = FALSE) stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' ", "must be a valid logical value or vector", call. = call. ) } .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = TRUE) if ((!naAllowed && all(is.na(x))) || !is.logical(x)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", x, ") ", "must be a valid logical value or vector", call. = call. ) } } .assertIsNoDefault <- function(x, argumentName, noDefaultAvailable, ..., checkNA = FALSE, call. = TRUE) { if (noDefaultAvailable && (!checkNA || all(is.na(x)))) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' ", "must be specified, there is no default value", call. = call. ) } } .assertIsSingleLogical <- function(x, argumentName, ..., naAllowed = FALSE, noDefaultAvailable = FALSE, call. = TRUE) { if (missing(x) || is.null(x) || length(x) == 0) { .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = FALSE) stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a single logical value", call. = call. ) } .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = TRUE) if (length(x) > 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' ", .arrayToString(x, vectorLookAndFeelEnabled = TRUE), " must be a single logical value", call. = call. ) } if ((!naAllowed && is.na(x)) || !is.logical(x)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", ifelse(isS4(x), .getClassName(x), x), ") must be a single logical value", call. = call. ) } } .assertIsSingleNumber <- function(x, argumentName, ..., naAllowed = FALSE, noDefaultAvailable = FALSE, call. = TRUE) { if (missing(x) || is.null(x) || length(x) == 0) { .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = FALSE) stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid numeric value", call. = call. ) } .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = TRUE) if (length(x) > 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' ", .arrayToString(x, vectorLookAndFeelEnabled = TRUE), " must be a single numeric value", call. = call. ) } if ((!naAllowed && is.na(x)) || !is.numeric(x)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", ifelse(isS4(x), .getClassName(x), x), ") must be a valid numeric value", call. = call. ) } } .assertIsSingleInteger <- function(x, argumentName, ..., naAllowed = FALSE, validateType = TRUE, noDefaultAvailable = FALSE, call. = TRUE) { .assertIsSinglePositiveInteger( x = x, argumentName = argumentName, naAllowed = naAllowed, validateType = validateType, mustBePositive = FALSE, noDefaultAvailable = noDefaultAvailable, call. = call. ) } .assertIsSinglePositiveInteger <- function(x, argumentName, ..., naAllowed = FALSE, validateType = TRUE, mustBePositive = TRUE, noDefaultAvailable = FALSE, call. = TRUE) { prefix <- ifelse(mustBePositive, "single positive ", "single ") if (missing(x) || is.null(x) || length(x) == 0) { .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = FALSE) stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a ", prefix, "integer value", call. = call. ) } .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = TRUE) if (length(x) > 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' ", .arrayToString(x, vectorLookAndFeelEnabled = TRUE), " must be a ", prefix, "integer value", call. = call. ) } if (!is.numeric(x) || (!naAllowed && is.na(x)) || (validateType && !is.integer(x)) || (!validateType && !is.na(x) && !is.infinite(x) && as.integer(x) != x)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", ifelse(isS4(x), .getClassName(x), x), ") must be a ", prefix, "integer value", call. = call. ) } if (mustBePositive && !is.na(x) && !is.infinite(x) && x <= 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", ifelse(isS4(x), .getClassName(x), x), ") must be a ", prefix, "integer value", call. = call. ) } } .assertIsSingleCharacter <- function(x, argumentName, ..., naAllowed = FALSE, noDefaultAvailable = FALSE, call. = TRUE) { if (missing(x) || is.null(x) || length(x) == 0) { .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = FALSE) stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid character value", call. = call. ) } .assertIsNoDefault(x, argumentName, noDefaultAvailable, checkNA = TRUE) if (length(x) > 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' ", .arrayToString(x, vectorLookAndFeelEnabled = TRUE), " must be a single character value", call. = call. ) } if (!is.character(x)) { stop( sprintf(paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'%s' must be a valid character value (is an instance of class '%s')" ), argumentName, .getClassName(x)), call. = call. ) } if (!naAllowed && is.na(x)) { stop( sprintf(paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'%s' (NA) must be a valid character value" ), argumentName), call. = call. ) } } .assertIsCharacter <- function(x, argumentName, ..., naAllowed = FALSE, call. = TRUE) { if (missing(x) || is.null(x) || length(x) == 0) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid character value or vector", call. = call. ) } if (!all(is.character(x))) { stop( sprintf(paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'%s' must be a valid character value or vector ", "(is an instance of class '%s')" ), argumentName, .getClassName(x)), call. = call. ) } if (!naAllowed && any(is.na(x))) { stop( sprintf( paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'%s' (%s) must be a valid character value (NA is not allowed)" ), argumentName, .arrayToString(x) ), call. = call. ) } } .assertDesignParameterExists <- function(design, parameterName, defaultValue) { if (missing(design)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'design' must be defined") } if (missing(parameterName)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'parameterName' must be defined") } if (missing(defaultValue)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'defaultValue' must be defined") } value <- design[[parameterName]] if (is.null(value) || length(value) == 0 || all(is.na(value))) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "parameter '", parameterName, "' must be specified in design" ) } if (is.null(defaultValue) || length(defaultValue) == 0 || all(is.na(defaultValue))) { design$.setParameterType(parameterName, C_PARAM_USER_DEFINED) return(invisible()) } if (all(value == defaultValue)) { design$.setParameterType(parameterName, C_PARAM_DEFAULT_VALUE) } else { design$.setParameterType(parameterName, C_PARAM_USER_DEFINED) } } .designParameterExists <- function(design, parameterName) { if (missing(design)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'design' must be defined") } if (missing(parameterName)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'parameterName' must be defined") } value <- design[[parameterName]] if (is.null(value)) { return(FALSE) } if (length(value) > 1) { return(sum(is.na(value)) < length(value)) } return(!is.na(value)) } .assertIsOptimizationCriterion <- function(x) { if (!.isOptimizationCriterion(x)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "optimization criterion must be one of the following: ", .printOptimizationCriterion() ) } } .assertIsValidAlpha <- function(alpha) { .assertIsSingleNumber(alpha, "alpha") if (alpha < 1e-06 || alpha >= 0.5) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'alpha' (", alpha, ") is out of bounds [1e-06; 0.5)" ) } } .assertIsValidKappa <- function(kappa) { .assertIsSingleNumber(kappa, "kappa") .assertIsInOpenInterval(kappa, "kappa", lower = 0, upper = NULL) } .assertIsValidLambda <- function(lambda, lambdaNumber = 0) { argumentName <- "lambda" if (lambdaNumber >= 1) { argumentName <- paste0("lambda", lambdaNumber) } .assertIsNumericVector(lambda, argumentName, naAllowed = TRUE) if (all(is.na(lambda))) { return(invisible()) } if (any(is.na(lambda))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .arrayToString(lambda), ") must be a valid numeric vector" ) } .assertIsInClosedInterval(lambda, argumentName, lower = 0, upper = NULL) if (all(lambda == 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .arrayToString(lambda), ") not allowed: ", "at least one lambda value must be > 0" ) } } .assertIsValidFollowUpTime <- function(followUpTime) { if (is.null(followUpTime) || length(followUpTime) == 0 || is.na(followUpTime)) { return(invisible()) } .assertIsSingleNumber(followUpTime, "followUpTime", naAllowed = TRUE) if (followUpTime < 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'followUpTime' (", followUpTime, ") must be >= 0") } } .assertIsValidAccrualTime <- function(accrualTime) { .assertIsNumericVector(accrualTime, "accrualTime", naAllowed = TRUE) if (is.null(accrualTime) || length(accrualTime) == 0 || all(is.na(accrualTime))) { return(invisible()) } if (any(accrualTime < 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualTime' (", .arrayToString(accrualTime), ") must be >= 0" ) } } .assertIsValidStandardDeviation <- function(stDev) { .assertIsSingleNumber(stDev, "stDev") if (stDev <= 0) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "standard deviation 'stDev' (", stDev, ") must be > 0") } } .assertIsValidBeta <- function(beta, alpha) { .assertIsSingleNumber(beta, "beta") .assertIsSingleNumber(alpha, "alpha") if (beta < 1e-04 || beta >= 1 - alpha) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'beta' (", beta, ") is out of bounds [1e-04; ", (1 - alpha), "); ", "condition: 1e-05 <= alpha < 1 - beta <= 1 - 1e-04" ) } } .assertIsValidAlphaAndBeta <- function(alpha, beta) { .assertIsValidAlpha(alpha) .assertIsValidBeta(beta, alpha) } .assertIsValidStage <- function(stage, kMax) { if (stage < 1 || stage > kMax) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'stage' (", stage, ") is out of bounds [1; ", kMax, "]" ) } } .assertIsValidIterationsAndSeed <- function(iterations, seed, ..., zeroIterationsAllowed = TRUE) { if (is.null(iterations) || length(iterations) == 0 || !is.numeric(iterations)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'iterations' must be a valid integer value" ) } if (zeroIterationsAllowed) { if (iterations < 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'iterations' (", iterations, ") must be >= 0" ) } } else { if (iterations < 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'iterations' (", iterations, ") must be > 0" ) } } if (is.null(seed) || length(seed) == 0 || (!is.na(seed) && !is.numeric(seed))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'seed' (", seed, ") must be a valid integer value" ) } } .assertIsValidLegendPosition <- function(legendPosition) { if (is.null(legendPosition) || length(legendPosition) != 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'legendPosition' (", .arrayToString(legendPosition), ") must be a single integer or character value" ) } if (is.na(legendPosition)) { return(invisible()) } if (!is.numeric(legendPosition) && !is.character(legendPosition)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'legendPosition' (", legendPosition, ") must be a single integer or character value" ) } if (is.numeric(legendPosition)) { .assertIsSingleInteger(legendPosition, "legendPosition", validateType = FALSE) .assertIsInClosedInterval(legendPosition, "legendPosition", lower = -1, upper = 6) } else { validLegendPositions <- c("none", "top", "bottom", "left", "right") if (!(legendPosition %in% validLegendPositions)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'legendPosition' (", legendPosition, ") must be one of the following values: ", .arrayToString(validLegendPositions) ) } } } .assertIsValidKMax <- function(kMax, kMaxLowerBound = 1, kMaxUpperBound = C_KMAX_UPPER_BOUND, ..., showWarnings = FALSE) { .assertIsSingleInteger(kMax, "kMax", validateType = FALSE) .assertIsInClosedInterval(kMax, "kMax", lower = kMaxLowerBound, upper = kMaxUpperBound) if (showWarnings && kMax > 10) { warning("The usage of 'kMax' (", kMax, ") > 10 is not validated", call. = FALSE) } } .assertAreValidInformationRates <- function(informationRates, kMax = length(informationRates), kMaxLowerBound = 1L, kMaxUpperBound = C_KMAX_UPPER_BOUND) { if (length(informationRates) < kMaxLowerBound) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'informationRates' (%s) is out of bounds [%s; %s]" ), length(informationRates), kMaxLowerBound, ifelse(kMax >= kMaxLowerBound && kMax < C_KMAX_UPPER_BOUND, kMax, C_KMAX_UPPER_BOUND) )) } .assertIsValidKMax(kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) if (length(informationRates) != kMax) { stop(sprintf( paste0( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'informationRates' (%s) must be equal to 'kMax' (%s)" ), length(informationRates), kMax )) } if (length(informationRates) > kMaxUpperBound) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'informationRates' (%s) is out of bounds [%s; %s]" ), length(informationRates), kMaxLowerBound, kMax )) } if (kMax == 1) { return(invisible()) } .assertValuesAreInsideBounds("informationRates", informationRates, 0, 1, lowerBoundInclusive = FALSE ) if (min(informationRates) <= 0 || max(informationRates) > 1 || any(informationRates[2:kMax] <= informationRates[1:(kMax - 1)])) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'informationRates' (%s) ", "must be strictly increasing: 0 < x_1 < .. < x_%s <= 1" ), .arrayToString(informationRates, vectorLookAndFeelEnabled = FALSE), kMax )) } } .assertValuesAreInsideBounds <- function(parameterName, values, lowerBound, upperBound, ..., lowerBoundInclusive = TRUE, upperBoundInclusive = TRUE) { lower <- min(values) upper <- max(values) lowerInvalid <- ifelse(lowerBoundInclusive, lower < lowerBound, lower <= lowerBound) upperInvalid <- ifelse(upperBoundInclusive, upper > upperBound, upper >= upperBound) if (!is.na(lowerInvalid)) { if (lowerInvalid || upperInvalid) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'%s' (%s) is out of bounds %s%s; %s%s" ), parameterName, .arrayToString(values, vectorLookAndFeelEnabled = FALSE), ifelse(lowerBoundInclusive, "[", "("), lowerBound, upperBound, ifelse(upperBoundInclusive, "]", ")") )) } } } .assertContainsNoNas <- function(values, parameterName) { if (any(is.na(values))) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'%s' (%s) ", "must contain valid numeric values (NA is not allowed)" ), parameterName, .arrayToString(values, vectorLookAndFeelEnabled = FALSE) )) } } .assertContainsOnlyNasAtTheEnd <- function(values, parameterName) { if (length(values) <= 1) { return(invisible()) } for (i in length(values):2) { if (!is.na(values[i]) && is.na(values[i - 1])) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'%s' (%s) ", "must contain valid numeric values (NAs are only allowed at the end of the vector)" ), parameterName, .arrayToString(values, vectorLookAndFeelEnabled = FALSE) )) } } } .assertValuesAreStrictlyIncreasing <- function(values, parameterName, endingNasAllowed = FALSE) { len <- length(values) if (len <= 1) { return(invisible()) } if (!endingNasAllowed) { .assertContainsNoNas(values, parameterName) } .assertContainsOnlyNasAtTheEnd(values, parameterName) valuesTemp <- values values <- na.omit(values) len <- length(values) if (len <= 1) { return(invisible()) } if (any(values[2:len] <= values[1:(len - 1)])) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'%s' (%s) ", "must be strictly increasing: x_1 < .. < x_%s" ), parameterName, .arrayToString(valuesTemp, vectorLookAndFeelEnabled = FALSE), len )) } } .assertValuesAreMonotoneIncreasing <- function(values, parameterName, endingNasAllowed = FALSE) { len <- length(values) if (len <= 1) { return(invisible()) } if (!endingNasAllowed) { .assertContainsNoNas(values, parameterName) } .assertContainsOnlyNasAtTheEnd(values, parameterName) valuesTemp <- values values <- na.omit(values) len <- length(values) if (len <= 1) { return(invisible()) } if (any(values[2:len] < values[1:(len - 1)])) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'%s' (%s) ", "must be increasing: x_1 <= .. <= x_%s" ), parameterName, .arrayToString(valuesTemp, vectorLookAndFeelEnabled = FALSE), len )) } } .assertAreValidFutilityBounds <- function(futilityBounds, kMax = length(futilityBounds) + 1, kMaxLowerBound = 1, kMaxUpperBound = C_KMAX_UPPER_BOUND) { if (length(futilityBounds) < kMaxLowerBound - 1) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'futilityBounds' (%s) is out of bounds [%s; %s]" ), length(futilityBounds), kMaxLowerBound - 1, ifelse(kMax >= kMaxLowerBound && kMax < C_KMAX_UPPER_BOUND, kMax - 1, C_KMAX_UPPER_BOUND - 1) )) } .assertIsValidKMax(kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) if (length(futilityBounds) != kMax - 1) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'futilityBounds' (", length(futilityBounds), ") must be equal to 'kMax' (", kMax, ") - 1" ) } .assertValuesAreInsideBounds("futilityBounds", futilityBounds, -Inf, 6) } .assertIsValidCipher <- function(key, value) { if (getCipheredValue(value) != C_CIPHERS[[key]]) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'token' and/or 'secret' unkown") } } .assertIsValidAlpha0Vec <- function(alpha0Vec, kMax = length(alpha0Vec) - 1, kMaxLowerBound = 1, kMaxUpperBound = C_KMAX_UPPER_BOUND) { if (length(alpha0Vec) < kMaxLowerBound - 1) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'alpha0Vec' (%s) is out of bounds [%s; %s]" ), length(alpha0Vec), kMaxLowerBound - 1, kMax - 1 )) } .assertIsValidKMax(kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound) if (length(alpha0Vec) != kMax - 1) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'alpha0Vec' (", length(alpha0Vec), ") must be equal to 'kMax' (", kMax, ") - 1" ) } .assertValuesAreInsideBounds("alpha0Vec", alpha0Vec, 0, 1, lowerBoundInclusive = FALSE) } .assertIsValidSidedParameter <- function(sided) { if (is.null(match.call(expand.dots = FALSE)[["sided"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'sided' must be defined") } if (sided != 1 && sided != 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'sided' (", sided, ") must be 1 or 2") } } .assertIsValidGroupsParameter <- function(groups) { if (is.null(match.call(expand.dots = FALSE)[["groups"]])) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'groups' must be defined") } if (groups != 1 && groups != 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'groups' (", groups, ") must be 1 or 2") } } .allArgumentsAreNotNull <- function(...) { args <- list(...) naCounter <- 0 for (arg in args) { if (!is.null(arg)) { naCounter <- naCounter + sum(is.na(arg)) } } return(naCounter == 0) } .assertAssociatedArgumentsAreDefined <- function(...) { .associatedArgumentsAreDefined(..., warningOnlyEnabled = FALSE) } .associatedArgumentsAreDefined <- function(..., warningOnlyEnabled = TRUE) { args <- NULL tryCatch(expr = { args <- list(...) }, error = function(e) { stop(simpleError(paste0(C_EXCEPTION_TYPE_MISSING_ARGUMENT, e$message), call = e$call)) }) if (.allArgumentsAreNotNull(...)) { return(invisible(TRUE)) } args <- args[args != "warningOnlyEnabled" & !is.null(args)] argNames <- names(args) if (sum(argNames == "") > 0) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "each argument must have a name defined, e.g. a = a") } definedArguments <- c() undefinedArguments <- c() for (i in 1:length(args)) { arg <- args[i] argName <- argNames[i] if (missing(arg) || (!is.null(arg) && sum(is.na(arg)) > 0)) { undefinedArguments <- c(undefinedArguments, argName) } else { definedArguments <- c(definedArguments, argName) } } if (length(undefinedArguments) > 0 && length(definedArguments) > 0) { message <- paste0( .arrayToString(undefinedArguments, encapsulate = TRUE), " ", ifelse(warningOnlyEnabled, "should", "must"), " be defined because ", .arrayToString(definedArguments, encapsulate = TRUE), ifelse(length(definedArguments) > 1, " are", " is"), " defined" ) if (warningOnlyEnabled) { warning(C_EXCEPTION_TYPE_INCOMPLETE_ARGUMENTS, message, call. = FALSE) return(FALSE) } else { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, message) } } return(invisible(length(definedArguments) == length(args))) } .assertIsValidNPlanned <- function(nPlanned, kMax, stage, ..., required = TRUE) { if (is.null(nPlanned) || (length(nPlanned) > 0 && all(is.na(nPlanned)))) { if (!required) { return(invisible()) } stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'nPlanned' must be specified") } if (length(nPlanned) != kMax - stage) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf( paste0( "'nPlanned' (%s) is invalid: ", "length must be equal to %s (kMax - stage = %s - %s)" ), .arrayToString(nPlanned), kMax - stage, kMax, stage ) ) } if (sum(is.na(nPlanned)) > 0 || sum(nPlanned <= 0) > 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf( paste0( "'nPlanned' (%s) is invalid: ", "all values must be > 0" ), .arrayToString(nPlanned) ) ) } } .isValidNPlanned <- function(nPlanned, kMax, stage) { if (missing(nPlanned)) { warning("'nPlanned' is missing", call. = FALSE) return(FALSE) } if (!any(is.na(nPlanned))) { if ((length(nPlanned) != kMax - stage)) { warning(sprintf( paste0( "'nPlanned' (%s) will be ignored: ", "length must be equal to %s (kMax - stage = %s - %s)" ), .arrayToString(nPlanned), kMax - stage, kMax, stage ), call. = FALSE) return(FALSE) } if (sum(is.na(nPlanned)) > 0 || sum(nPlanned <= 0) > 0) { warning(sprintf( paste0( "'nPlanned' (%s) will be ignored: ", "all values must be > 0" ), .arrayToString(nPlanned) ), call. = FALSE) return(FALSE) } } return(TRUE) } .warnInCaseOfUnknownArguments <- function(..., functionName, ignore = character(0), numberOfAllowedUnnamedParameters = 0, exceptionEnabled = FALSE) { args <- list(...) if (length(args) == 0) { return(invisible()) } if (numberOfAllowedUnnamedParameters > 0) { ignore <- c(ignore, paste0("%param", 1:numberOfAllowedUnnamedParameters, "%")) } ignore <- c(ignore, "showWarnings") argNames <- names(args) for (i in 1:length(args)) { arg <- args[[i]] argName <- ifelse(is.null(argNames[i]) || argNames[i] == "", ifelse(inherits(arg, "StageResults"), "stageResultsName", paste0("%param", i, "%")), argNames[i] ) if (!(argName %in% ignore) && !grepl("^\\.", argName)) { if (isS4(arg) || is.environment(arg)) { arg <- .getClassName(arg) } if (is.function(arg)) { arg <- "function(...)" } argValue <- paste0(" (", .getClassName(arg), ")") tryCatch(expr = { argValue <- .arrayToString(arg, vectorLookAndFeelEnabled = length(arg) > 1, encapsulate = is.character(arg)) argValue <- paste0(" = ", argValue) }, error = function(e) {}) if (exceptionEnabled) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "argument unknown in ", functionName, "(...): '", argName, "'", argValue, " is not allowed", call. = FALSE ) } else { warning("Argument unknown in ", functionName, "(...): '", argName, "'", argValue, " will be ignored", call. = FALSE ) } } } } .warnInCaseOfUnusedArgument <- function(arg, argName, defaultValue, functionName) { if (!identical(arg, defaultValue)) { warning("Unused argument in ", functionName, "(...): '", argName, "' = ", .arrayToString(arg, vectorLookAndFeelEnabled = (length(arg) > 1), maxLength = 10), " will be ignored", call. = FALSE ) } } .isTrialDesignWithValidFutilityBounds <- function(design) { if (is.null(design) || !.isTrialDesignInverseNormalOrGroupSequential(design)) { return(FALSE) } futilityBounds <- design[["futilityBounds"]] if (is.null(futilityBounds)) { return(FALSE) } if (length(futilityBounds) == 0 || sum(is.na(futilityBounds)) == design$kMax) { return(FALSE) } return(any(na.omit(futilityBounds) > C_FUTILITY_BOUNDS_DEFAULT)) } .isTrialDesignWithValidAlpha0Vec <- function(design) { if (is.null(design) || !.isTrialDesignFisher(design)) { return(FALSE) } alpha0Vec <- design[["alpha0Vec"]] if (is.null(alpha0Vec)) { return(FALSE) } alpha0Vec <- na.omit(alpha0Vec) if (length(alpha0Vec) == 0 || all(is.na(alpha0Vec))) { return(FALSE) } return(any(alpha0Vec != C_ALPHA_0_VEC_DEFAULT)) } .assertPackageIsInstalled <- function(packageName) { if (!requireNamespace(packageName, quietly = TRUE)) { stop("Package \"", packageName, "\" is needed for this function to work. ", "Please install using, e.g., install.packages(\"", packageName, "\")", call. = FALSE ) } } .assertGgplotIsInstalled <- function() { .assertPackageIsInstalled("ggplot2") } .assertRcppIsInstalled <- function() { .assertPackageIsInstalled("Rcpp") } .assertTestthatIsInstalled <- function() { .assertPackageIsInstalled("testthat") } .assertMnormtIsInstalled <- function() { .assertPackageIsInstalled("mnormt") } .assertIsValidThetaH0 <- function(thetaH0, ..., endpoint = c("means", "rates", "survival"), groups, ratioEnabled = FALSE) { .warnInCaseOfUnknownArguments(functionName = ".assertIsValidThetaH0", ...) if (is.na(thetaH0)) { return(invisible()) } if (!is.numeric(thetaH0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaH0' must be a valid numeric value") } endpoint <- match.arg(endpoint) if (endpoint == "means" || endpoint == "rates") { if (groups == 2 && ratioEnabled) { if (thetaH0 <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaH0' (", thetaH0, ") must be > 0") } return(invisible()) } } if (endpoint == "rates") { if (groups == 1) { if (thetaH0 <= 0 || thetaH0 >= 1) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'thetaH0' (", thetaH0, ") is out of bounds (0; 1) or not specified" ) } } else { if (thetaH0 <= -1 || thetaH0 >= 1) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'thetaH0' (", thetaH0, ") is out of bounds (-1; 1)" ) } } } else if (endpoint == "survival") { if (thetaH0 <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaH0' (", thetaH0, ") must be > 0") } } } .assertIsValidThetaH0DataInput <- function(thetaH0, dataInput) { if (.isDatasetRates(dataInput)) { endpoint <- "rates" } else if (.isDatasetSurvival(dataInput)) { endpoint <- "survival" } else { endpoint <- "means" } .assertIsValidThetaH0(thetaH0, endpoint = endpoint, groups = dataInput$getNumberOfGroups()) } .assertIsValidThetaRange <- function(..., thetaRange, thetaAutoSeqEnabled = TRUE, survivalDataEnabled = FALSE) { if (is.null(thetaRange) || (thetaAutoSeqEnabled && length(thetaRange) <= 1) || any(is.na(thetaRange))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaRange' (", .arrayToString(thetaRange), ") must be a vector ", "with two entries defining minimum and maximum ", "or a sequence of numeric values with length > 2" ) } else if (length(thetaRange) == 2 && thetaAutoSeqEnabled) { minValue <- thetaRange[1] maxValue <- thetaRange[2] if (survivalDataEnabled) { .assertIsValidHazardRatio(minValue, "thetaRange[1]") .assertIsValidHazardRatio(maxValue, "thetaRange[2]") } if (minValue >= maxValue) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaRange' with length 2 must contain minimum < maximum (", minValue, " >= ", maxValue, ")" ) } by <- (maxValue - minValue) / C_THETA_RANGE_SEQUENCE_LENGTH_DEFAULT thetaRange <- seq(minValue, maxValue, by) } invisible(thetaRange) } .assertIsValidPiTreatmentRange <- function(..., piTreatmentRange, piAutoSeqEnabled = TRUE) { if (is.null(piTreatmentRange) || (piAutoSeqEnabled && length(piTreatmentRange) <= 1) || any(is.na(piTreatmentRange))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piTreatmentRange' (", .arrayToString(piTreatmentRange), ") must be a vector ", "with two entries defining minimum and maximum ", "or a sequence of numeric values with length > 2" ) } else if (length(piTreatmentRange) == 2) { if (piAutoSeqEnabled) { minValue <- piTreatmentRange[1] maxValue <- piTreatmentRange[2] if (minValue == 0) { minValue <- 0.00000001 } if (maxValue == 1) { maxValue <- 0.99999999 } .assertIsValidPi(minValue, "piTreatmentRange[1]") .assertIsValidPi(maxValue, "piTreatmentRange[2]") if (minValue >= maxValue) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piTreatmentRange' with length 2 must contain minimum < maximum (", minValue, " >= ", maxValue, ")" ) } by <- (maxValue - minValue) / C_THETA_RANGE_SEQUENCE_LENGTH_DEFAULT piTreatmentRange <- seq(minValue, maxValue, by) } } invisible(piTreatmentRange) } .assertIsValidPi <- function(piValue, piName) { if (is.null(piValue) || length(piValue) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", piName, "' must be a valid numeric value" ) } if (all(is.na(piValue))) { return(invisible()) } if (!is.numeric(piValue) || any(is.na(piValue))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", piName, "' (", .arrayToString(piValue), ") must be a valid numeric value" ) } if (any(piValue <= -1e-16) || any(piValue >= 1 + 1e-16)) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'", piName, "' (", .arrayToString(piValue), ") is out of bounds (0; 1) or event time too long" ) } } .assertIsValidPi1 <- function(pi1, stageResults = NULL, stage = NULL) { if (is.na(pi1) && !is.null(stageResults) && !is.null(stage)) { if (stageResults$isOneSampleDataset()) { pi1 <- stageResults$overallEvents[stage] / stageResults$overallSampleSizes[stage] } else { pi1 <- stageResults$overallEvents1[stage] / stageResults$overallSampleSizes1[stage] } } .assertIsInClosedInterval(pi1, "pi1", lower = 0, upper = 1) invisible(pi1) } .assertIsValidPi2 <- function(pi2, stageResults = NULL, stage = NULL) { if (is.na(pi2) && !is.null(stageResults) && !is.null(stage)) { pi2 <- stageResults$overallEvents2[stage] / stageResults$overallSampleSizes2[stage] } .assertIsInClosedInterval(pi2, "pi2", lower = 0, upper = 1) invisible(pi2) } .assertIsValidAllocationRatioPlanned <- function(allocationRatioPlanned, numberOfGroups) { if (numberOfGroups == 1) { return(invisible()) } .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval( allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM ) if (allocationRatioPlanned != C_ALLOCATION_RATIO_DEFAULT && numberOfGroups == 1) { warning("Planned allocation ratio ", allocationRatioPlanned, " will be ignored ", "because the specified data has only one group", call. = FALSE ) } } .assertIsValidAllocationRatioPlannedSampleSize <- function(allocationRatioPlanned, maxNumberOfSubjects = NA_real_) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") if (allocationRatioPlanned < 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'allocationRatioPlanned' (", allocationRatioPlanned, ") must be >= 0" ) } if (length(maxNumberOfSubjects) > 0 && !is.na(maxNumberOfSubjects) && maxNumberOfSubjects > 0 && allocationRatioPlanned == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "determination of optimal allocation ratio not possible ", "if explicit or implicit 'maxNumberOfSubjects' (", maxNumberOfSubjects, ") > 0, i.e., follow-up time should be calculated ", "(please specify an 'allocationRatioPlanned' > 0)" ) } } .assertIsValidThetaH1 <- function(thetaH1, stageResults = NULL, stage = NULL, ..., results = NULL) { if (is.na(thetaH1) && !is.null(stageResults) && !is.null(stage)) { thetaH1 <- stageResults$effectSizes[stage] if (!is.null(results)) { results$.setParameterType("thetaH1", C_PARAM_GENERATED) } } .assertIsSingleNumber(thetaH1, "thetaH1") invisible(thetaH1) } .assertIsValidAssumedStDev <- function(assumedStDev, stageResults = NULL, stage = NULL, ..., results = NULL) { if (is.na(assumedStDev) && !is.null(stageResults) && !is.null(stage)) { assumedStDev <- stageResults$overallStDevs[stage] if (!is.null(results)) { results$.setParameterType("assumedStDev", C_PARAM_GENERATED) } } .assertIsSingleNumber(assumedStDev, "assumedStDev") if (assumedStDev <= 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'assumedStDev' (", assumedStDev, ") must be > 0" ) } invisible(assumedStDev) } .assertIsValidThetaH1ForMultiArm <- function(thetaH1, stageResults = NULL, stage = NULL, ..., results = NULL) { if (!is.null(stageResults) && all(is.na(thetaH1)) && !is.null(stage)) { thetaH1 <- stageResults$effectSizes[, stage] if (!is.null(results)) { results$.setParameterType("thetaH1", C_PARAM_GENERATED) } } .assertIsNumericVector(thetaH1, "thetaH1", naAllowed = TRUE) invisible(thetaH1) } .assertIsValidThetaH1ForEnrichment <- function(thetaH1, stageResults = NULL, stage = NULL, ..., results = NULL) { invisible(.assertIsValidThetaH1ForMultiArm( thetaH1 = thetaH1, stageResults = stageResults, stage = stage, results = results )) } .assertIsValidAssumedStDevForMultiHypotheses <- function(assumedStDev, stageResults = NULL, stage = NULL, ..., results = NULL) { if (!is.null(stageResults) && all(is.na(assumedStDev)) && !is.null(stage)) { if (is.matrix(stageResults$overallStDevs)) { # inherits(stageResults, "StageResultsMultiArmMeans") assumedStDev <- stageResults$overallStDevs[, stage] } else { assumedStDev <- stageResults$overallStDevs[stage] } if (!is.null(results)) { results$.setParameterType("assumedStDevs", C_PARAM_GENERATED) } } .assertIsNumericVector(assumedStDev, "assumedStDev", naAllowed = TRUE) if (any(assumedStDev <= 0, na.rm = TRUE)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'assumedStDev' (", .arrayToString(assumedStDev), ") must be > 0" ) } invisible(assumedStDev) } .assertIsValidAssumedStDevs <- function(assumedStDevs, gMax) { if (length(assumedStDevs) != 1 && length(assumedStDevs) != gMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0( "length of 'assumedStDevs' (%s) ", "must be equal to 'gMax' (%s) or 1" ), .arrayToString(assumedStDevs), gMax) ) } } .assertIsValidPiTreatmentsForMultiArm <- function(piTreatments, stageResults = NULL, stage = NULL, ..., results = NULL) { if (!is.null(stageResults) && all(is.na(piTreatments)) && !is.null(stage)) { piTreatments <- stageResults$overallPiTreatments[, stage] if (!is.null(results)) { results$.setParameterType("piTreatments", C_PARAM_GENERATED) } } .assertIsNumericVector(piTreatments, "piTreatments", naAllowed = TRUE) .assertIsInClosedInterval(piTreatments, "piTreatments", lower = 0, upper = 1, naAllowed = TRUE) invisible(piTreatments) } .assertIsValidPiControlForMultiArm <- function(piControl, stageResults = NULL, stage = NULL, ..., results = NULL) { if (!is.null(stageResults) && is.na(piControl) && !is.null(stage)) { piControl <- stageResults$overallPiControl[, stage] if (!is.null(results)) { results$.setParameterType("piControl", C_PARAM_GENERATED) } } .assertIsNumericVector(piControl, "piControl", naAllowed = TRUE) .assertIsInClosedInterval(piControl, "piControl", lower = 0, upper = 1) invisible(piControl) } .assertIsValidPiTreatmentsForEnrichment <- function(piTreatments, stageResults = NULL, stage = NULL, ..., results = NULL) { if (!is.null(stageResults) && all(is.na(piTreatments)) && !is.null(stage)) { piTreatments <- stageResults$overallPisTreatment[, stage] if (!is.null(results)) { results$.setParameterType("piTreatments", C_PARAM_GENERATED) } } .assertIsNumericVector(piTreatments, "piTreatments", naAllowed = TRUE) .assertIsInClosedInterval(piTreatments, "piTreatments", lower = 0, upper = 1, naAllowed = TRUE) invisible(piTreatments) } .assertIsValidPiControlForEnrichment <- function(piControls, stageResults = NULL, stage = NULL, ..., results = NULL) { if (!is.null(stageResults) && all(is.na(piControls)) && !is.null(stage)) { piControls <- stageResults$overallPisControl[, stage] if (!is.null(results)) { results$.setParameterType("piControls", C_PARAM_GENERATED) } } .assertIsNumericVector(piControls, "piControls", naAllowed = TRUE) .assertIsInClosedInterval(piControls, "piControls", lower = 0, upper = 1, naAllowed = TRUE) invisible(piControls) } .assertIsValidHazardRatio <- function(hazardRatio, thetaH0) { .assertIsNumericVector(hazardRatio, "hazardRatio") if (any(hazardRatio == thetaH0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "alternative not correctly specified: ", "each hazard ratio (", .arrayToString(hazardRatio[1:min(length(hazardRatio), 10)]), ") must be unequal to 'thetaH0' (", thetaH0, ")" ) } } .assertIsValidHazardRatioVector <- function(hazardRatio) { .assertIsNumericVector(hazardRatio, "hazardRatio") if (any(hazardRatio <= 0)) { if (length(hazardRatio) == 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'hazardRatio' (", hazardRatio, ") must be > 0") } else { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "each 'hazardRatio' (", .arrayToString(hazardRatio[1:min(length(hazardRatio), 10)]), ") must be > 0" ) } } } .assertIsValidDirectionUpper <- function(directionUpper, sided, objectType = c("sampleSize", "power"), userFunctionCallEnabled = FALSE) { objectType <- match.arg(objectType) .assertIsSingleLogical(directionUpper, "directionUpper", naAllowed = TRUE) if (objectType == "power") { if (sided == 1 && is.na(directionUpper)) { directionUpper <- TRUE } if (userFunctionCallEnabled && sided == 2 && !is.na(directionUpper)) { warning("'directionUpper' will be ignored because it ", "is not applicable for 'sided' = 2", call. = FALSE ) } } else if (is.na(directionUpper)) { directionUpper <- TRUE } return(directionUpper) } .assertIsFunction <- function(fun) { if (is.null(fun)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'fun' must be a valid function") } if (!is.function(fun)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'fun' must be a function (is ", .getClassName(fun), ")") } } .assertIsValidFunction <- function(fun, ..., funArgName = "fun", expectedArguments = NULL, expectedFunction = NULL, identical = FALSE, validateThreeDots = TRUE, showUnusedArgumentsMessage = FALSE, namedArgumentsExpected = FALSE) { fCall <- match.call(expand.dots = FALSE) if (is.null(expectedArguments) && is.null(expectedFunction)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'expectedArguments' or 'expectedFunction' must be not NULL" ) } if (!is.function(fun)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", funArgName, "' must be a function" ) } functionName <- as.character(fCall$fun) if (is.null(functionName) || functionName == funArgName) { functionName <- "function" } argNames <- methods::formalArgs(fun) if (!is.null(expectedArguments)) { argNamesExpected <- expectedArguments } else if (!is.null(expectedFunction)) { if (!is.function(expectedFunction)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'expectedFunction' must be a function" ) } argNamesExpected <- methods::formalArgs(expectedFunction) } if (validateThreeDots) { if (!("..." %in% argNames)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", funArgName, "' must contain the three-dots argument '...', e.g., ", funArgName, " = ", functionName, "(", .arrayToString(argNames), ", ...)" ) } } argNames <- argNames[argNames != "..."] argNamesExpected <- argNamesExpected[argNamesExpected != "..."] if (length(argNamesExpected) < ifelse(namedArgumentsExpected, 1, 2) && length(argNames) == length(argNamesExpected)) { return(invisible()) } for (argName in argNames) { if (argName != "..." && !(argName %in% argNamesExpected)) { msg <- paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the argument '", argName, "' in '", funArgName, "' (", functionName, ") is not allowed." ) if (length(argNamesExpected) == 1) { stop(msg, " Expected: '", argNamesExpected, "'") } stop( msg, "\n\n", "Use one or more of the following arguments:\n ", .arrayToString(argNamesExpected, encapsulate = TRUE) ) } } if (identical) { for (argNameExpected in argNamesExpected) { if (argNameExpected != "..." && !(argNameExpected %in% argNames)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", funArgName, "' (", functionName, ") must contain ", "an argument with name '", argNameExpected, "'" ) } } return(invisible()) } counter <- 0 unusedArgs <- c() for (argNameExpected in argNamesExpected) { if (argNameExpected %in% argNames) { counter <- counter + 1 } else { unusedArgs <- c(unusedArgs, argNameExpected) } } if (counter == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", funArgName, "' (", functionName, ") must contain at ", "least one of the following arguments: ", .arrayToString(argNamesExpected) ) } if (showUnusedArgumentsMessage && length(unusedArgs) > 0) { message("Note that the following arguments can optionally be used in '", funArgName, "' (", functionName, "): \n", .arrayToString(unusedArgs), call. = FALSE ) } } .assertIsValidThreshold <- function(threshold, activeArms) { .assertIsNumericVector(threshold, "threshold", naAllowed = TRUE) if ((length(threshold) != 1) && (length(threshold) != activeArms)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'threshold' (", .arrayToString(threshold), ") must be a single value or a vector of length ", activeArms ) } } .assertIsValidPlannedSubjectsOrEvents <- function(design, plannedValues, parameterName = c("plannedSubjects", "plannedEvents")) { parameterName <- match.arg(parameterName) .assertIsIntegerVector(plannedValues, parameterName, validateType = FALSE) if (length(plannedValues) != design$kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", parameterName, "' (", .arrayToString(plannedValues), ") must have length ", design$kMax ) } .assertIsInClosedInterval(plannedValues, parameterName, lower = 1, upper = NULL) .assertValuesAreStrictlyIncreasing(plannedValues, parameterName) } .assertIsValidNumberOfSubjectsPerStage <- function(parameterValues, parameterName, plannedSubjects, conditionalPower, calcSubjectsFunction, kMax, endpoint = c("means", "rates", "survival"), calcSubjectsFunctionEnabled = TRUE) { endpoint <- match.arg(endpoint) if (kMax == 1) { .ignoreParameterIfNotUsed( "conditionalPower", conditionalPower, kMax > 1, "design is fixed ('kMax' = 1)" ) return(invisible(NA_real_)) } .assertIsNumericVector(parameterValues, parameterName, naAllowed = TRUE) calcSubjectsFunctionName <- ifelse(endpoint == "survival", "calcEventsFunction", "calcSubjectsFunction") if (is.na(conditionalPower) && is.null(calcSubjectsFunction)) { if (length(parameterValues) != 1 || !is.na(parameterValues)) { if (calcSubjectsFunctionEnabled) { warning("'", parameterName, "' (", .arrayToString(parameterValues), ") ", "will be ignored because neither 'conditionalPower' nor '", calcSubjectsFunctionName, "' is defined", call. = FALSE ) } else { warning("'", parameterName, "' (", .arrayToString(parameterValues), ") ", "will be ignored because 'conditionalPower' is not defined", call. = FALSE ) } } return(invisible(NA_real_)) } if (!is.na(conditionalPower) && length(parameterValues) == 0 || (length(parameterValues) == 1 && is.na(parameterValues))) { if (calcSubjectsFunctionEnabled) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", parameterName, "' must be defined ", "because 'conditionalPower' or '", calcSubjectsFunctionName, "' is defined" ) } else { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", parameterName, "' must be defined ", "because 'conditionalPower' is defined" ) } } if (length(parameterValues) != kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", parameterName, "' (", .arrayToString(parameterValues), ") must have length ", kMax ) } if (any(is.na(parameterValues[2:length(parameterValues)]))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", parameterName, "' (", .arrayToString(parameterValues), ") must contain valid numeric values" ) } if (!is.na(parameterValues[1]) && parameterValues[1] != plannedSubjects[1]) { warning("First value of '", parameterName, "' (", parameterValues[1], ") will be ignored", call. = FALSE) } parameterValues[1] <- plannedSubjects[1] .assertIsInClosedInterval(parameterValues, parameterName, lower = 1, upper = NULL) return(invisible(parameterValues)) } .assertIsValidMaxNumberOfSubjects <- function(maxNumberOfSubjects, naAllowed = FALSE) { .assertIsSingleNumber(maxNumberOfSubjects, "maxNumberOfSubjects", naAllowed = naAllowed) .assertIsInClosedInterval(maxNumberOfSubjects, "maxNumberOfSubjects", lower = 1, upper = NULL, naAllowed = naAllowed) } .assertAreSuitableInformationRates <- function(design, dataInput, stage) { if (!.isTrialDesignGroupSequential(design) || stage == 1) { return(invisible()) } param <- NA_character_ paramValues <- NA_real_ if (dataInput$isDatasetSurvival()) { if (any(abs(design$informationRates[2:stage] - dataInput$getOverallEventsUpTo(stage)[2:stage] / dataInput$getOverallEventsUpTo(1) * design$informationRates[1]) > C_ACCEPT_DEVIATION_INFORMATIONRATES)) { param <- "events" paramValues <- dataInput$getOverallEventsUpTo(stage) } } else { if (dataInput$getNumberOfGroups() == 1) { if (any(abs(design$informationRates[2:stage] - dataInput$getOverallSampleSizesUpTo(stage)[2:stage] / dataInput$getOverallSampleSizesUpTo(1) * design$informationRates[1]) > C_ACCEPT_DEVIATION_INFORMATIONRATES)) { param <- "sample sizes" paramValues <- dataInput$getOverallSampleSizesUpTo(stage) } } else if (dataInput$getNumberOfGroups() == 2) { if (any(abs(design$informationRates[2:stage] - dataInput$getOverallSampleSizesUpTo(stage)[2:stage] / dataInput$getOverallSampleSizesUpTo(1) * design$informationRates[1]) > C_ACCEPT_DEVIATION_INFORMATIONRATES) || any(abs(design$informationRates[2:stage] - dataInput$getOverallSampleSizesUpTo(stage, 2)[2:stage] / dataInput$getOverallSampleSizesUpTo(1, 2) * design$informationRates[1]) > C_ACCEPT_DEVIATION_INFORMATIONRATES)) { param <- "sample sizes" paramValues <- dataInput$getOverallSampleSizesUpTo(stage) + dataInput$getOverallSampleSizesUpTo(stage, 2) } } } if (!is.na(param)) { warning("Observed ", param, " (", .arrayToString(paramValues), ") not according to specified information rates (", .arrayToString(design$informationRates[1:stage]), ") in ", "group sequential design. ", "Test procedure might not control Type I error rate", call. = FALSE ) } } .assertIsOneSidedDesign <- function(design, designType = c("multi-arm", "enrichment"), engineType = c("simulation", "analysis")) { if (design$sided == 2) { designType <- match.arg(designType) engineType <- match.arg(engineType) stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, designType, " ", engineType, " is only applicable for one-sided testing" ) } } .isMultiArmDataset <- function(dataInput) { return(inherits(dataInput, "Dataset") && dataInput$getNumberOfGroups() > 2) } .isMultiArmStageResults <- function(stageResults) { return(inherits(stageResults, "StageResults") && grepl("MultiArm", .getClassName(stageResults))) } .isEnrichmentStageResults <- function(stageResults) { return(inherits(stageResults, "StageResults") && grepl("Enrichment", .getClassName(stageResults))) } .isEnrichmentConditionalPowerResults <- function(conditionalPowerResults) { return(inherits(conditionalPowerResults, "ConditionalPowerResults") && grepl("Enrichment", .getClassName(conditionalPowerResults))) } .isMultiArmAnalysisResults <- function(analysisResults) { return(inherits(analysisResults, "AnalysisResultsMultiArm")) } .isMultiHypothesesAnalysisResults <- function(x) { return(.isMultiArmAnalysisResults(x) || .isEnrichmentAnalysisResults(x)) } .isEnrichmentDataset <- function(dataInput) { return(inherits(dataInput, "Dataset") && dataInput$.enrichmentEnabled) } .isEnrichmentAnalysisResults <- function(analysisResults) { return(inherits(analysisResults, "AnalysisResultsEnrichment")) } .isMultiArmSimulationResults <- function(simulationResults) { return(inherits(simulationResults, "SimulationResults") && grepl("MultiArm", .getClassName(simulationResults))) } .isEnrichmentSimulationResults <- function(simulationResults) { return(inherits(simulationResults, "SimulationResults") && grepl("Enrichment", .getClassName(simulationResults))) } .assertIsStageResultsMultiArm <- function(stageResults) { if (!inherits(stageResults, "StageResults")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stageResults' must be a multi-arm stage results object (is ", .getClassName(stageResults), ")" ) } if (!.isMultiArmStageResults(stageResults)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stageResults' must be a multi-arm object (is ", .getClassName(stageResults), ")" ) } } .assertIsStageResultsNonMultiHypotheses <- function(stageResults) { if (inherits(stageResults, "StageResults") && .isMultiArmStageResults(stageResults)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stageResults' must be a non-multi-arm object (is ", .getClassName(stageResults), ")" ) } if (inherits(stageResults, "StageResults") && .isEnrichmentStageResults(stageResults)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stageResults' must be a non-enrichment object (is ", .getClassName(stageResults), ")" ) } allowedClasses <- c( "StageResultsMeans", "StageResultsRates", "StageResultsSurvival" ) if (!(.getClassName(stageResults) %in% allowedClasses)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stageResults' must be an instance of ", .arrayToString(allowedClasses, vectorLookAndFeelEnabled = FALSE), " (is '", .getClassName(stageResults), "')" ) } } .assertIsDatasetNonMultiHypotheses <- function(dataInput) { if (.isMultiArmDataset(dataInput)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' must be a non-multi-arm dataset (has ", dataInput$getNumberOfGroups(), " treatment arms)" ) } if (.isEnrichmentDataset(dataInput)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' must be a non-enrichment dataset (has ", dataInput$getNumberOfSubsets(), " subsets)" ) } } .assertIsAnalysisResults <- function(analysisResults) { if (!inherits(analysisResults, "AnalysisResults")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'analysisResults' must be a valid 'AnalysisResults' object ", " (is '", .getClassName(analysisResults), "')" ) } } .isValidIntersectionTestMultiArm <- function(intersectionTest) { return(!is.null(intersectionTest) && length(intersectionTest) == 1 && !is.na(intersectionTest) && is.character(intersectionTest) && intersectionTest %in% C_INTERSECTION_TESTS_MULTIARMED) } .getCorrectedIntersectionTestMultiArmIfNecessary <- function(design, intersectionTest, userFunctionCallEnabled = TRUE) { .assertIsCharacter(intersectionTest, "intersectionTest") intersectionTest <- intersectionTest[1] if (.isTrialDesignConditionalDunnett(design) && intersectionTest != "Dunnett") { if (userFunctionCallEnabled) { message <- paste0("Intersection test '", intersectionTest, "' ") if (!.isValidIntersectionTestMultiArm(intersectionTest)) { message <- paste0(message, "is invalid, ") } message <- paste0(message, "will be ignored") message <- paste0(message, ifelse(!.isValidIntersectionTestMultiArm(intersectionTest), ", ", " ")) message <- paste0( message, "and 'Dunnett' will be used instead ", "because conditional Dunnett test was specified as design" ) warning(message, call. = FALSE) } intersectionTest <- "Dunnett" } return(intersectionTest) } .assertIsValidIntersectionTestMultiArm <- function(design, intersectionTest) { .assertIsCharacter(intersectionTest, "intersectionTest") intersectionTest <- intersectionTest[1] if (!.isValidIntersectionTestMultiArm(intersectionTest)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'intersectionTest' (", intersectionTest, ") must be one of ", .arrayToString(C_INTERSECTION_TESTS_MULTIARMED, encapsulate = TRUE) ) } if (.isTrialDesignConditionalDunnett(design) && intersectionTest != "Dunnett") { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "intersection test ('", intersectionTest, "') must be 'Dunnett' ", "because conditional Dunnett test was specified as design" ) } } .isValidIntersectionTestEnrichment <- function(intersectionTest) { return(!is.null(intersectionTest) && length(intersectionTest) == 1 && !is.na(intersectionTest) && is.character(intersectionTest) && intersectionTest %in% C_INTERSECTION_TESTS_ENRICHMENT) } .assertIsValidIntersectionTestEnrichment <- function(design, intersectionTest) { .assertIsCharacter(intersectionTest, "intersectionTest") intersectionTest <- intersectionTest[1] if (!.isValidIntersectionTestEnrichment(intersectionTest)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'intersectionTest' (", intersectionTest, ") must be one of ", .arrayToString(C_INTERSECTION_TESTS_ENRICHMENT, encapsulate = TRUE) ) } return(intersectionTest) } .ignoreParameterIfNotUsed <- function(paramName, paramValue, requirementLogical, requirementFailedReason, prefix = NA_character_) { if (all(is.na(paramValue)) || requirementLogical) { return(paramValue) } if (is.na(prefix) || trimws(prefix) == "") { prefix <- "" } else { prefix <- paste0(trimws(prefix), " ") } warning(prefix, "'", paramName, "' (", .arrayToString(paramValue), ") will be ignored because ", requirementFailedReason, call. = FALSE ) return(NA_real_) } # # This is a workaround for the following R core bug: # # rCoreBugDemonstration <- function(stageX, ...) { # result <- list(...); result$stageX <- stageX; return(result) # } # # bug: stage will be removed, stageX gets the value of stage # rCoreBugDemonstration("A", stage = 1) # # everything works as expected # rCoreBugDemonstration("A", state = 1) # .stopInCaseOfIllegalStageDefinition <- function(stageResults, ...) { stage <- list(...)[["stage"]] if (is.null(stage) && is.numeric(stageResults) && stageResults %in% 1L:C_KMAX_UPPER_BOUND) { stage <- stageResults } if (!is.null(stage)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stage' (", stage, ") can only be defined in getStageResults() or getAnalysisResults()" ) } } .stopInCaseOfIllegalStageDefinition2 <- function(...) { forbiddenStage <- .getOptionalArgument("stage", ...) if (!is.null(forbiddenStage)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'stage' (", forbiddenStage, ") can only be defined in getStageResults() or getAnalysisResults()" ) } } .assertIsValidTolerance <- function(tolerance) { .assertIsSingleNumber(tolerance, "tolerance") .assertIsInOpenInterval(tolerance, "tolerance", lower = 0, upper = 0.1) } .isValidVarianceOptionMultiArmed <- function(varianceOption) { return(!is.null(varianceOption) && length(varianceOption) == 1 && !is.na(varianceOption) && is.character(varianceOption) && varianceOption %in% C_VARIANCE_OPTIONS_MULTIARMED) } .assertIsValidVarianceOptionMultiArmed <- function(design, varianceOption) { if (!.isValidVarianceOptionMultiArmed(varianceOption)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'varianceOption' should be one of ", .arrayToString(C_VARIANCE_OPTIONS_MULTIARMED, encapsulate = TRUE) ) } if (.isTrialDesignConditionalDunnett(design) && varianceOption != C_VARIANCE_OPTION_DUNNETT) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "variance option ('", varianceOption, "') must be '", C_VARIANCE_OPTION_DUNNETT, "' ", "because conditional Dunnett test was specified as design" ) } } .isValidVarianceOptionEnrichment <- function(varianceOption) { return(!is.null(varianceOption) && length(varianceOption) == 1 && !is.na(varianceOption) && is.character(varianceOption) && varianceOption %in% C_VARIANCE_OPTIONS_ENRICHMENT) } .assertIsValidVarianceOptionEnrichment <- function(design, varianceOption) { if (!.isValidVarianceOptionEnrichment(varianceOption)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'varianceOption' should be one of ", .arrayToString(C_VARIANCE_OPTIONS_ENRICHMENT, encapsulate = TRUE) ) } } .assertIsValidSummaryIntervalFormat <- function(intervalFormat) { .assertIsSingleCharacter(intervalFormat, "intervalFormat") # "[%s; %s]" if (!grepl("^[^%]*%s[^%]*%s[^%]*$", intervalFormat)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'intervalFormat' (", intervalFormat, ") has an invalid format; ", "the control character %s must appear exactly twice; ", "to change it use 'options(\"rpact.summary.intervalFormat\" = \"[%s; %s]\")'" ) } } .isSpecialPlotShowSourceArgument <- function(showSource) { return(is.character(showSource) && showSource %in% C_PLOT_SHOW_SOURCE_ARGUMENTS) } .assertIsValidTypeOfSelection <- function(typeOfSelection, rValue, epsilonValue, activeArms) { .assertIsCharacter(typeOfSelection, "typeOfSelection") typeOfSelection <- typeOfSelection[1] if (typeOfSelection == "rbest") { typeOfSelection <- "rBest" } if (!(typeOfSelection %in% C_TYPES_OF_SELECTION)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'typeOfSelection' (", typeOfSelection, ") must be one of ", .arrayToString(C_TYPES_OF_SELECTION, encapsulate = TRUE) ) } if (typeOfSelection == "rBest") { .assertIsSingleNumber(rValue, "rValue", naAllowed = FALSE, noDefaultAvailable = TRUE) if (activeArms == 1) { warning("'typeOfSelection' (\"", typeOfSelection, "\") will be ignored ", "because 'activeArms' or 'populations' = 1", call. = FALSE ) } else if (rValue > activeArms) { warning("'rValue' (", rValue, ") is larger than activeArms or populations ", "(", activeArms, ") and will be ignored", call. = FALSE ) } } else if (!is.na(rValue)) { warning("'rValue' (", rValue, ") will be ignored because 'typeOfSelection' != \"rBest\"", call. = FALSE) } if (typeOfSelection == "epsilon") { .assertIsSingleNumber(epsilonValue, "epsilonValue", naAllowed = FALSE, noDefaultAvailable = TRUE) .assertIsInClosedInterval(epsilonValue, "epsilonValue", lower = 0, upper = NULL, naAllowed = TRUE) } else if (!is.na(epsilonValue)) { warning("'epsilonValue' (", epsilonValue, ") will be ignored ", "because 'typeOfSelection' != \"epsilon\"", call. = FALSE ) } return(typeOfSelection) } .assertIsValidSuccessCriterion <- function(successCriterion) { .assertIsCharacter(successCriterion, "successCriterion") successCriterion <- successCriterion[1] if (!(successCriterion %in% C_SUCCESS_CRITERIONS)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'successCriterion' (", successCriterion, ") must be one of ", .arrayToString(C_SUCCESS_CRITERIONS, encapsulate = TRUE) ) } return(successCriterion) } .assertIsValidEffectMeasure <- function(effectMeasure) { .assertIsCharacter(effectMeasure, "effectMeasure") effectMeasure <- effectMeasure[1] if (!(effectMeasure %in% C_EFFECT_MEASURES)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'effectMeasure' (", effectMeasure, ") must be one of ", .arrayToString(C_EFFECT_MEASURES, encapsulate = TRUE) ) } return(effectMeasure) } .assertIsValidMatrix <- function(x, argumentName, ..., expectedNumberOfColumns = NA_integer_, naAllowed = FALSE, returnSingleValueAsMatrix = FALSE) { if (missing(x) || is.null(x) || length(x) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'", argumentName, "' must be a valid matrix") } if (returnSingleValueAsMatrix && !is.matrix(x) && (is.numeric(x) || is.character(x) || is.logical(x))) { if (length(x) == 1) { x <- matrix(x) } else if (length(x) > 1 && !is.na(expectedNumberOfColumns)) { if (length(x) %% expectedNumberOfColumns != 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the length of '", argumentName, "' (", .arrayToString(x), ") must be a divisor or a multiple ", expectedNumberOfColumns ) } x <- matrix(x, ncol = expectedNumberOfColumns) } } if (!is.matrix(x)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .getClassName(x), ") must be a valid matrix") } if (!naAllowed && any(is.na(x))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .arrayToString(x), ") must not contain NA's") } if (!is.numeric(x)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .arrayToString(x), ") must be a valid numeric matrix" ) } if (!is.na(expectedNumberOfColumns) && ncol(x) != expectedNumberOfColumns) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argumentName, "' (", .arrayToString(x), ") must be a numeric matrix with ", expectedNumberOfColumns, " columns" ) } return(invisible(x)) } .assertIsValidDecisionMatrix <- function(decisionMatrix, kMax) { .assertIsValidMatrix(decisionMatrix, "decisionMatrix", naAllowed = FALSE) if (!(nrow(decisionMatrix) %in% c(2, 4))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'decisionMatrix' must have two or four rows") } if (ncol(decisionMatrix) != kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'decisionMatrix' must have 'kMax' ", "(= length(informationRates) = ", kMax, ") columns" ) } if (any(decisionMatrix[2:nrow(decisionMatrix), ] < decisionMatrix[1:(nrow(decisionMatrix) - 1), ])) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'decisionMatrix' needs to be increasing in each column") } } .assertIsValidTypeOfShape <- function(typeOfShape) { .assertIsCharacter(typeOfShape, "typeOfShape") typeOfShape <- typeOfShape[1] if (!(typeOfShape %in% C_TYPES_OF_SHAPE)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'typeOfShape' (", typeOfShape, ") must be one of ", .arrayToString(C_TYPES_OF_SHAPE, encapsulate = TRUE) ) } return(typeOfShape) } .assertIsValidEffectMatrixMeans <- function(typeOfShape, effectMatrix, muMaxVector, gED50, gMax, slope) { if (typeOfShape == "userDefined") { effectMatrix <- .assertIsValidMatrix(effectMatrix, "effectMatrix", expectedNumberOfColumns = gMax, naAllowed = FALSE, returnSingleValueAsMatrix = TRUE ) .assertIsNumericVector(muMaxVector, "muMaxVector", naAllowed = TRUE) if (!all(is.na(muMaxVector)) && !identical(muMaxVector, C_ALTERNATIVE_POWER_SIMULATION_DEFAULT)) { warning("'muMaxVector' (", .arrayToString(muMaxVector), ") will be ignored because it will be set to first column of 'effectMatrix'", call. = FALSE ) } } else if (!is.null(effectMatrix)) { warning("'effectMatrix' will be ignored because 'typeOfShape' is defined as '", typeOfShape, "'", call. = FALSE) } if (typeOfShape == "sigmoidEmax") { .assertIsNumericVector(muMaxVector, "muMaxVector", naAllowed = FALSE, noDefaultAvailable = TRUE) .assertIsSingleNumber(gED50, "gED50", naAllowed = FALSE, noDefaultAvailable = TRUE) effectMatrix <- matrix(muMaxVector, nrow = length(muMaxVector), ncol = 1) %*% matrix((1:gMax)^slope / (gED50^slope + (1:gMax)^slope), nrow = 1, ncol = gMax) return(effectMatrix) } if (!is.null(gED50) && !is.na(gED50)) { warning("'gED50' (", gED50, ") will be ignored because 'typeOfShape' is defined as '", typeOfShape, "'", call. = FALSE) } if (typeOfShape == "linear") { .assertIsNumericVector(muMaxVector, "muMaxVector", naAllowed = FALSE, noDefaultAvailable = TRUE) effectMatrix <- matrix(muMaxVector, nrow = length(muMaxVector), ncol = 1) %*% matrix((1:gMax) / gMax, nrow = 1, ncol = gMax) } if (!is.null(slope) && !is.na(slope) && slope != 1) { warning("'slope' (", slope, ") will be ignored because 'typeOfShape' is defined as '", typeOfShape, "'", call. = FALSE) } return(effectMatrix) } .assertIsValidEffectMatrixRates <- function(typeOfShape, effectMatrix, piMaxVector, piControl, gED50, gMax, slope) { if (typeOfShape == "userDefined") { effectMatrix <- .assertIsValidMatrix(effectMatrix, "effectMatrix", expectedNumberOfColumns = gMax, naAllowed = FALSE, returnSingleValueAsMatrix = TRUE ) .assertIsInOpenInterval(effectMatrix, "effectMatrix", 0, 1, naAllowed = FALSE) .assertIsNumericVector(piMaxVector, "piMaxVector", naAllowed = TRUE) if (!all(is.na(piMaxVector)) && !identical(piMaxVector, C_PI_1_DEFAULT)) { warning("'piMaxVector' (", .arrayToString(piMaxVector), ") will be ignored because it will be set to first column of 'effectMatrix'", call. = FALSE ) } } else if (!is.null(effectMatrix)) { warning("'effectMatrix' will be ignored because 'typeOfShape' is defined as '", typeOfShape, "'", call. = FALSE) } if (typeOfShape == "sigmoidEmax") { .assertIsNumericVector(piMaxVector, "piMaxVector", naAllowed = FALSE, noDefaultAvailable = TRUE) .assertIsInOpenInterval(piMaxVector, "piMaxVector", 0, 1, naAllowed = FALSE) .assertIsSingleNumber(gED50, "gED50", naAllowed = FALSE, noDefaultAvailable = TRUE) effectMatrix <- matrix(piMaxVector, nrow = length(piMaxVector), ncol = 1) %*% matrix((1:gMax)^slope / (gED50^slope + (1:gMax)^slope), nrow = 1, ncol = gMax) return(effectMatrix) } if (!is.null(gED50) && !is.na(gED50)) { warning("'gED50' (", gED50, ") will be ignored because 'typeOfShape' is defined as '", typeOfShape, "'", call. = FALSE) } if (typeOfShape == "linear") { .assertIsNumericVector(piMaxVector, "piMaxVector", naAllowed = FALSE, noDefaultAvailable = TRUE) .assertIsInOpenInterval(piMaxVector, "piMaxVector", 0, 1, naAllowed = FALSE) .assertIsSingleNumber(piControl, "piControl", naAllowed = FALSE, noDefaultAvailable = TRUE) .assertIsInOpenInterval(piControl, "piControl", 0, 1, naAllowed = FALSE) effectMatrix <- piControl + matrix(piMaxVector - piControl, nrow = length(piMaxVector), ncol = 1) %*% matrix((1:gMax) / gMax, nrow = 1, ncol = gMax) } if (!is.null(slope) && !is.na(slope) && slope != 1) { warning("'slope' (", slope, ") will be ignored because 'typeOfShape' is defined as '", typeOfShape, "'", call. = FALSE) } return(effectMatrix) } .assertIsValidEffectMatrixSurvival <- function(typeOfShape, effectMatrix, omegaMaxVector, gED50, gMax, slope) { if (typeOfShape == "userDefined") { effectMatrix <- .assertIsValidMatrix(effectMatrix, "effectMatrix", expectedNumberOfColumns = gMax, naAllowed = FALSE, returnSingleValueAsMatrix = TRUE ) .assertIsInOpenInterval(effectMatrix, "effectMatrix", 0, NULL, naAllowed = FALSE) .assertIsNumericVector(omegaMaxVector, "omegaMaxVector", naAllowed = TRUE) if (!all(is.na(omegaMaxVector)) && !identical(omegaMaxVector, C_RANGE_OF_HAZARD_RATIOS_DEFAULT)) { warning("'omegaMaxVector' (", .arrayToString(omegaMaxVector), ") will be ignored because it will be set to first column of 'effectMatrix'", call. = FALSE ) } } else if (!is.null(effectMatrix)) { warning("'effectMatrix' will be ignored because 'typeOfShape' is defined as '", typeOfShape, "'", call. = FALSE) } if (typeOfShape == "sigmoidEmax") { .assertIsNumericVector(omegaMaxVector, "omegaMaxVector", naAllowed = FALSE, noDefaultAvailable = TRUE) .assertIsInOpenInterval(omegaMaxVector, "omegaMaxVector", 0, NULL, naAllowed = FALSE) .assertIsSingleNumber(gED50, "gED50", naAllowed = FALSE, noDefaultAvailable = TRUE) effectMatrix <- matrix(omegaMaxVector - 1, nrow = length(omegaMaxVector), ncol = 1) %*% matrix((1:gMax)^slope / (gED50^slope + (1:gMax)^slope), nrow = 1, ncol = gMax) + 1 return(effectMatrix) } if (!is.null(gED50) && !is.na(gED50)) { warning("'gED50' (", gED50, ") will be ignored because 'typeOfShape' is defined as '", typeOfShape, "'", call. = FALSE) } if (typeOfShape == "linear") { .assertIsNumericVector(omegaMaxVector, "omegaMaxVector", naAllowed = FALSE, noDefaultAvailable = TRUE) .assertIsInOpenInterval(omegaMaxVector, "omegaMaxVector", 0, NULL, naAllowed = FALSE) effectMatrix <- matrix(omegaMaxVector - 1, nrow = length(omegaMaxVector), ncol = 1) %*% matrix((1:gMax) / gMax, nrow = 1, ncol = gMax) + 1 } if (!is.null(slope) && !is.na(slope) && slope != 1) { warning("'slope' (", slope, ") will be ignored because 'typeOfShape' is defined as '", typeOfShape, "'", call. = FALSE) } return(effectMatrix) } .assertIsValidPlannedSubjects <- function(plannedSubjects, kMax) { .assertIsIntegerVector(plannedSubjects, "plannedSubjects", validateType = FALSE) if (length(plannedSubjects) != kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'plannedSubjects' (", .arrayToString(plannedSubjects), ") must have length 'kMax' (", kMax, ")" ) } .assertIsInClosedInterval(plannedSubjects, "plannedSubjects", lower = 1, upper = NULL) .assertValuesAreStrictlyIncreasing(plannedSubjects, "plannedSubjects") } .isAlphaSpendingDesign <- function(design) { if (!.isTrialDesignInverseNormalOrGroupSequential(design)) { return(FALSE) } return(grepl("^as", design$typeOfDesign)) } .isDelayedInformationEnabled <- function(..., design = NULL, delayedInformation = NULL) { if (is.null(design) && is.null(delayedInformation)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "either 'design' or 'delayedInformation' must be specified") } if (!is.null(design)) { if (!.isTrialDesignInverseNormalOrGroupSequential(design)) { return(FALSE) } delayedInformation <- design[["delayedInformation"]] } if (is.null(delayedInformation)) { return(FALSE) } return(all(!is.na(delayedInformation)) && any(delayedInformation >= 1e-03)) } rpact/R/f_analysis_multiarm_survival.R0000644000176200001440000015412214445307575017727 0ustar liggesusers## | ## | *Analysis of survival in multi-arm designs with adaptive test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_logger.R NULL #' @title #' Get Analysis Results Survival #' #' @description #' Returns an analysis result object. #' #' @param design The trial design. #' #' @return Returns a \code{AnalysisResultsSurvival} object. #' #' @keywords internal #' #' @noRd #' .getAnalysisResultsSurvivalMultiArm <- function(..., design, dataInput) { if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsSurvivalInverseNormalMultiArm( design = design, dataInput = dataInput, ... )) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsSurvivalFisherMultiArm( design = design, dataInput = dataInput, ... )) } if (.isTrialDesignConditionalDunnett(design)) { return(.getAnalysisResultsSurvivalConditionalDunnettMultiArm( design = design, dataInput = dataInput, ... )) } .stopWithWrongDesignMessage(design) } .getAnalysisResultsSurvivalInverseNormalMultiArm <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsSurvivalInverseNormalMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsMultiArmInverseNormal(design = design, dataInput = dataInput) results <- .getAnalysisResultsSurvivalMultiArmAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance ) return(results) } .getAnalysisResultsSurvivalFisherMultiArm <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsSurvivalFisherMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsMultiArmFisher(design = design, dataInput = dataInput) results <- .getAnalysisResultsSurvivalMultiArmAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } .getAnalysisResultsSurvivalConditionalDunnettMultiArm <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignConditionalDunnett(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsSurvivalConditionalDunnettMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsConditionalDunnett(design = design, dataInput = dataInput) results <- .getAnalysisResultsSurvivalMultiArmAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } .getAnalysisResultsSurvivalMultiArmAll <- function(..., results, design, dataInput, intersectionTest, stage, directionUpper, thetaH0, thetaH1, nPlanned, allocationRatioPlanned, tolerance, iterations, seed) { startTime <- Sys.time() intersectionTest <- .getCorrectedIntersectionTestMultiArmIfNecessary(design, intersectionTest) stageResults <- .getStageResultsSurvivalMultiArm( design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper ) results$.setStageResults(stageResults) .logProgress("Stage results calculated", startTime = startTime) gMax <- stageResults$getGMax() thetaH1 <- .assertIsValidThetaH1ForMultiArm(thetaH1, stageResults, stage, results = results) .setValueAndParameterType(results, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_MULTIARMED_DEFAULT) .setValueAndParameterType(results, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType(results, "thetaH0", thetaH0, C_THETA_H0_MEANS_DEFAULT) .setConditionalPowerArguments(results, dataInput, nPlanned, allocationRatioPlanned) .setNPlannedAndThetaH1(results, nPlanned, thetaH1) startTime <- Sys.time() if (!.isTrialDesignConditionalDunnett(design)) { results$.closedTestResults <- getClosedCombinationTestResults(stageResults = stageResults) } else { results$.closedTestResults <- getClosedConditionalDunnettTestResults( stageResults = stageResults, design = design, stage = stage ) } .logProgress("Closed test calculated", startTime = startTime) if (design$kMax > 1) { # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { results$.conditionalPowerResults <- .getConditionalPowerSurvivalMultiArm( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, iterations = iterations, seed = seed ) .synchronizeIterationsAndSeed(results) } else { results$.conditionalPowerResults <- .getConditionalPowerSurvivalMultiArm( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1 ) results$conditionalPower <- results$.conditionalPowerResults$conditionalPower results$.setParameterType("conditionalPower", C_PARAM_GENERATED) } results$thetaH1 <- matrix(results$.conditionalPowerResults$thetaH1, ncol = 1) .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() results$conditionalRejectionProbabilities <- .getConditionalRejectionProbabilitiesMultiArm( stageResults = stageResults, stage = stage, iterations = iterations, seed = seed ) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_GENERATED) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) } else { results$.setParameterType("conditionalPower", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalPowerSimulated", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_NOT_APPLICABLE) } # RCI - repeated confidence interval repeatedConfidenceIntervalLowerBounds <- numeric(0) repeatedConfidenceIntervalUpperBounds <- numeric(0) startTime <- Sys.time() repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsSurvivalMultiArm( design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, tolerance = tolerance ) results$repeatedConfidenceIntervalLowerBounds <- matrix(rep(NA_real_, gMax * design$kMax), nrow = gMax, ncol = design$kMax) results$repeatedConfidenceIntervalUpperBounds <- results$repeatedConfidenceIntervalLowerBounds for (k in 1:design$kMax) { for (treatmentArm in 1:gMax) { results$repeatedConfidenceIntervalLowerBounds[treatmentArm, k] <- repeatedConfidenceIntervals[treatmentArm, 1, k] results$repeatedConfidenceIntervalUpperBounds[treatmentArm, k] <- repeatedConfidenceIntervals[treatmentArm, 2, k] } } results$.setParameterType("repeatedConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("repeatedConfidenceIntervalUpperBounds", C_PARAM_GENERATED) # repeated p-value results$repeatedPValues <- .getRepeatedPValuesMultiArm(stageResults = stageResults, tolerance = tolerance) results$.setParameterType("repeatedPValues", C_PARAM_GENERATED) return(results) } .getStageResultsSurvivalMultiArm <- function(..., design, dataInput, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, calculateSingleStepAdjusted = FALSE, userFunctionCallEnabled = FALSE) { .assertIsTrialDesign(design) .assertIsDatasetSurvival(dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertIsValidDirectionUpper(directionUpper, design$sided) .assertIsSingleLogical(calculateSingleStepAdjusted, "calculateSingleStepAdjusted") .warnInCaseOfUnknownArguments( functionName = ".getStageResultsSurvivalMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) gMax <- dataInput$getNumberOfGroups() - 1 kMax <- design$kMax intersectionTest <- .getCorrectedIntersectionTestMultiArmIfNecessary( design, intersectionTest, userFunctionCallEnabled ) .assertIsValidIntersectionTestMultiArm(design, intersectionTest) stageResults <- StageResultsMultiArmSurvival( design = design, dataInput = dataInput, intersectionTest = intersectionTest, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), directionUpper = directionUpper, stage = stage ) effectSizes <- matrix(NA_real_, nrow = gMax, ncol = kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallPValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) dimnames(testStatistics) <- list(paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) dimnames(overallTestStatistics) <- list( paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "") ) dimnames(separatePValues) <- list(paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) dimnames(overallPValues) <- list(paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) for (k in 1:stage) { for (treatmentArm in 1:gMax) { effectSizes[treatmentArm, k] <- exp(dataInput$getOverallLogRanks(stage = k, group = treatmentArm) * (1 + dataInput$getOverallAllocationRatios(stage = k, group = treatmentArm)) / sqrt(dataInput$getOverallAllocationRatios(stage = k, group = treatmentArm) * dataInput$getOverallEvents(stage = k, group = treatmentArm))) testStatistics[treatmentArm, k] <- dataInput$getLogRanks(stage = k, group = treatmentArm) - sqrt(dataInput$getEvents(stage = k, group = treatmentArm)) * sqrt(dataInput$getAllocationRatios(stage = k, group = treatmentArm)) / (1 + dataInput$getAllocationRatios(stage = k, group = treatmentArm)) * log(thetaH0) overallTestStatistics[treatmentArm, k] <- dataInput$getOverallLogRanks(stage = k, group = treatmentArm) - sqrt(dataInput$getOverallEvents(stage = k, group = treatmentArm)) * sqrt(dataInput$getOverallAllocationRatios(stage = k, group = treatmentArm)) / (1 + dataInput$getOverallAllocationRatios(stage = k, group = treatmentArm)) * log(thetaH0) if (directionUpper) { separatePValues[treatmentArm, k] <- 1 - stats::pnorm(testStatistics[treatmentArm, k]) overallPValues[treatmentArm, k] <- 1 - stats::pnorm(overallTestStatistics[treatmentArm, k]) } else { separatePValues[treatmentArm, k] <- stats::pnorm(testStatistics[treatmentArm, k]) overallPValues[treatmentArm, k] <- stats::pnorm(overallTestStatistics[treatmentArm, k]) } } } .setWeightsToStageResults(design, stageResults) # Calculation of single stage adjusted p-Values and overall test statistics # for determination of RCIs if (calculateSingleStepAdjusted) { singleStepAdjustedPValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) combInverseNormal <- matrix(NA_real_, nrow = gMax, ncol = kMax) combFisher <- matrix(NA_real_, nrow = gMax, ncol = kMax) if (.isTrialDesignInverseNormal(design)) { weightsInverseNormal <- stageResults$weightsInverseNormal } else if (.isTrialDesignFisher(design)) { weightsFisher <- stageResults$weightsFisher } for (k in 1:stage) { selected <- sum(!is.na(separatePValues[, k])) allocationRatiosSelected <- as.numeric(na.omit( dataInput$getAllocationRatios(stage = k, group = (1:gMax)) )) sigma <- sqrt(allocationRatiosSelected / (1 + allocationRatiosSelected)) %*% sqrt(t(allocationRatiosSelected / (1 + allocationRatiosSelected))) diag(sigma) <- 1 for (treatmentArm in 1:gMax) { if ((intersectionTest == "Bonferroni") || (intersectionTest == "Simes")) { if (.isTrialDesignGroupSequential(design)) { overallPValues[treatmentArm, k] <- min(1, overallPValues[treatmentArm, k] * selected) } else { singleStepAdjustedPValues[treatmentArm, k] <- min(1, separatePValues[treatmentArm, k] * selected) } } else if (intersectionTest == "Sidak") { if (.isTrialDesignGroupSequential(design)) { overallPValues[treatmentArm, k] <- 1 - (1 - overallPValues[treatmentArm, k])^selected } else { singleStepAdjustedPValues[treatmentArm, k] <- 1 - (1 - separatePValues[treatmentArm, k])^selected } } else if (intersectionTest == "Dunnett") { if (!is.na(testStatistics[treatmentArm, k])) { df <- NA_real_ singleStepAdjustedPValues[treatmentArm, k] <- 1 - .getMultivariateDistribution( type = "normal", upper = ifelse(directionUpper, testStatistics[treatmentArm, k], -testStatistics[treatmentArm, k]), sigma = sigma, df = df ) } } if (.isTrialDesignInverseNormal(design)) { combInverseNormal[treatmentArm, k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(singleStepAdjustedPValues[treatmentArm, 1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) } else if (.isTrialDesignFisher(design)) { combFisher[treatmentArm, k] <- prod(singleStepAdjustedPValues[treatmentArm, 1:k]^weightsFisher[1:k]) } } } stageResults$overallTestStatistics <- overallTestStatistics stageResults$overallPValues <- overallPValues stageResults$effectSizes <- effectSizes stageResults$testStatistics <- testStatistics stageResults$separatePValues <- separatePValues stageResults$singleStepAdjustedPValues <- singleStepAdjustedPValues stageResults$.setParameterType("singleStepAdjustedPValues", C_PARAM_GENERATED) if (.isTrialDesignFisher(design)) { stageResults$combFisher <- combFisher stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$combInverseNormal <- combInverseNormal stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) } } else { stageResults$overallTestStatistics <- overallTestStatistics stageResults$overallPValues <- overallPValues stageResults$effectSizes <- effectSizes stageResults$testStatistics <- testStatistics stageResults$separatePValues <- separatePValues } return(stageResults) } .getRootThetaSurvivalMultiArm <- function(..., design, dataInput, treatmentArm, stage, directionUpper, intersectionTest, thetaLow, thetaUp, firstParameterName, secondValue, tolerance) { result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsSurvivalMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, intersectionTest = intersectionTest, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][treatmentArm, stage] return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, callingFunctionInformation = ".getRootThetaSurvivalMultiArm" ) return(result) } .getUpperLowerThetaSurvivalMultiArm <- function(..., design, dataInput, theta, treatmentArm, stage, directionUpper, conditionFunction, intersectionTest, firstParameterName, secondValue) { stageResults <- .getStageResultsSurvivalMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = exp(theta), directionUpper = directionUpper, intersectionTest = intersectionTest, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][treatmentArm, stage] maxSearchIterations <- 30 while (conditionFunction(secondValue, firstValue)) { theta <- 2 * theta stageResults <- .getStageResultsSurvivalMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = exp(theta), directionUpper = directionUpper, intersectionTest = intersectionTest, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][treatmentArm, stage] maxSearchIterations <- maxSearchIterations - 1 if (maxSearchIterations < 0) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, sprintf( paste0( "failed to find theta (k = %s, firstValue = %s, ", "secondValue = %s, levels(firstValue) = %s, theta = %s)" ), stage, stageResults[[firstParameterName]][treatmentArm, stage], secondValue, firstValue, theta ) ) } } return(theta) } .getRepeatedConfidenceIntervalsSurvivalMultiArmAll <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { .assertIsValidIntersectionTestMultiArm(design, intersectionTest) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) stageResults <- .getStageResultsSurvivalMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = 1, directionUpper = directionUpper, intersectionTest = intersectionTest, calculateSingleStepAdjusted = FALSE ) gMax <- dataInput$getNumberOfGroups() - 1 repeatedConfidenceIntervals <- array(NA_real_, dim = c(gMax, 2, design$kMax)) # Confidence interval for second stage when using conditional Dunnett test if (.isTrialDesignConditionalDunnett(design)) { startTime <- Sys.time() for (treatmentArm in 1:gMax) { if (!is.na(stageResults$testStatistics[treatmentArm, 2])) { iteration <- 30 thetaUpLimit <- 1 repeat{ stageResults <- .getStageResultsSurvivalMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaUpLimit, directionUpper = FALSE, intersectionTest = intersectionTest, calculateSingleStepAdjusted = FALSE ) rejected <- .getConditionalDunnettTestForCI( design = design, stageResults = stageResults, treatmentArm = treatmentArm ) iteration <- iteration - 1 if (rejected || iteration == 0) break thetaUpLimit <- 2 * thetaUpLimit } thetaLow <- 0 thetaUp <- thetaUpLimit iteration <- 30 prec <- 1 while (prec > tolerance) { theta <- (thetaLow + thetaUp) / 2 stageResults <- .getStageResultsSurvivalMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = TRUE, intersectionTest = intersectionTest, calculateSingleStepAdjusted = FALSE ) conditionalDunnettSingleStepRejected <- .getConditionalDunnettTestForCI( design = design, stageResults = stageResults, treatmentArm = treatmentArm ) ifelse(conditionalDunnettSingleStepRejected, thetaLow <- theta, thetaUp <- theta) ifelse(iteration > 0, prec <- thetaUp - thetaLow, prec <- 0) iteration <- iteration - 1 } repeatedConfidenceIntervals[treatmentArm, 1, 2] <- theta thetaLow <- 0 thetaUp <- thetaUpLimit iteration <- 30 prec <- 1 while (prec > tolerance) { theta <- (thetaLow + thetaUp) / 2 stageResults <- .getStageResultsSurvivalMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = FALSE, intersectionTest = intersectionTest, calculateSingleStepAdjusted = FALSE ) conditionalDunnettSingleStepRejected <- .getConditionalDunnettTestForCI( design = design, stageResults = stageResults, treatmentArm = treatmentArm ) ifelse(conditionalDunnettSingleStepRejected, thetaUp <- theta, thetaLow <- theta) ifelse(iteration > 0, prec <- thetaUp - thetaLow, prec <- 0) iteration <- iteration - 1 } repeatedConfidenceIntervals[treatmentArm, 2, 2] <- theta if (!is.na(repeatedConfidenceIntervals[treatmentArm, 1, 2]) && !is.na(repeatedConfidenceIntervals[treatmentArm, 2, 2]) && repeatedConfidenceIntervals[treatmentArm, 1, 2] > repeatedConfidenceIntervals[treatmentArm, 2, 2]) { repeatedConfidenceIntervals[treatmentArm, , 2] <- rep(NA_real_, 2) } } } .logProgress("Confidence intervals for final stage calculated", startTime = startTime) } else { # Repeated onfidence intervals when using combination tests if (intersectionTest == "Hierarchical") { warning("Repeated confidence intervals not available for ", "'intersectionTest' = \"Hierarchical\"", call. = FALSE ) return(repeatedConfidenceIntervals) } if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT criticalValues <- design$criticalValues conditionFunction <- .isFirstValueSmallerThanSecondValue } else if (.isTrialDesignInverseNormal(design)) { bounds <- design$futilityBounds border <- C_FUTILITY_BOUNDS_DEFAULT criticalValues <- design$criticalValues criticalValues[is.infinite(criticalValues) & criticalValues > 0] <- C_QNORM_MAXIMUM criticalValues[is.infinite(criticalValues) & criticalValues < 0] <- C_QNORM_MINIMUM conditionFunction <- .isFirstValueGreaterThanSecondValue } if (any(is.na(criticalValues[1:stage]))) { warning("Repeated confidence intervals not because ", sum(is.na(criticalValues)), " critical values are NA (", .arrayToString(criticalValues), ")", call. = FALSE ) return(repeatedConfidenceIntervals) } # necessary for adjustment for binding futility boundaries futilityCorr <- rep(NA_real_, design$kMax) stages <- (1:stage) for (k in stages) { startTime <- Sys.time() for (treatmentArm in 1:gMax) { if (!is.na(stageResults$testStatistics[treatmentArm, k]) && criticalValues[k] < C_QNORM_MAXIMUM) { # Finding maximum upper and minimum lower bounds for RCIs thetaLow <- exp(.getUpperLowerThetaSurvivalMultiArm( design = design, dataInput = dataInput, theta = -1, treatmentArm = treatmentArm, stage = k, directionUpper = TRUE, intersectionTest = intersectionTest, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k] )) thetaUp <- exp(.getUpperLowerThetaSurvivalMultiArm( design = design, dataInput = dataInput, theta = 1, treatmentArm = treatmentArm, stage = k, directionUpper = FALSE, intersectionTest = intersectionTest, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k] )) # finding upper and lower RCI limits through root function repeatedConfidenceIntervals[treatmentArm, 1, k] <- .getRootThetaSurvivalMultiArm( design = design, dataInput = dataInput, treatmentArm = treatmentArm, stage = k, directionUpper = TRUE, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) repeatedConfidenceIntervals[treatmentArm, 2, k] <- .getRootThetaSurvivalMultiArm( design = design, dataInput = dataInput, treatmentArm = treatmentArm, stage = k, directionUpper = FALSE, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) # adjustment for binding futility bounds if (k > 1 && !is.na(bounds[k - 1]) && conditionFunction(bounds[k - 1], border) && design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "singleStepAdjustedPValues", firstParameterName ) # Calculate new lower and upper bounds if (directionUpper) { thetaLow <- tolerance } else { thetaUp <- .getUpperLowerThetaSurvivalMultiArm( design = design, dataInput = dataInput, theta = 1, treatmentArm = treatmentArm, stage = k - 1, directionUpper = FALSE, conditionFunction = conditionFunction, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1] ) } futilityCorr[k] <- .getRootThetaSurvivalMultiArm( design = design, dataInput = dataInput, treatmentArm = treatmentArm, stage = k - 1, directionUpper = directionUpper, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance ) if (directionUpper) { repeatedConfidenceIntervals[treatmentArm, 1, k] <- min( min(futilityCorr[2:k]), repeatedConfidenceIntervals[treatmentArm, 1, k] ) } else { repeatedConfidenceIntervals[treatmentArm, 2, k] <- max( max(futilityCorr[2:k]), repeatedConfidenceIntervals[treatmentArm, 2, k] ) } } if (!is.na(repeatedConfidenceIntervals[treatmentArm, 1, k]) && !is.na(repeatedConfidenceIntervals[treatmentArm, 2, k]) && repeatedConfidenceIntervals[treatmentArm, 1, k] > repeatedConfidenceIntervals[treatmentArm, 2, k]) { repeatedConfidenceIntervals[treatmentArm, , k] <- rep(NA_real_, 2) } } } .logProgress("Repeated confidence intervals for stage %s calculated", startTime = startTime, k) } } return(repeatedConfidenceIntervals) } #' #' RCIs based on inverse normal combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsSurvivalMultiArmInverseNormal <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsSurvivalMultiArmInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsSurvivalMultiArmAll( design = design, dataInput = dataInput, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combInverseNormal", ... )) } #' #' RCIs based on Fisher's combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsSurvivalMultiArmFisher <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsSurvivalMultiArmFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsSurvivalMultiArmAll( design = design, dataInput = dataInput, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combFisher", ... )) } #' #' CIs based on conditional Dunnett test #' #' @noRd #' .getRepeatedConfidenceIntervalsSurvivalMultiArmConditionalDunnett <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsSurvivalMultiArmConditionalDunnett", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsSurvivalMultiArmAll( design = design, dataInput = dataInput, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "condDunnett", ... )) } #' #' Calculation of lower and upper limits of repeated confidence intervals (RCIs) for Survival #' #' @noRd #' .getRepeatedConfidenceIntervalsSurvivalMultiArm <- function(..., design) { if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsSurvivalMultiArmInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsSurvivalMultiArmFisher(design = design, ...)) } if (.isTrialDesignConditionalDunnett(design)) { return(.getRepeatedConfidenceIntervalsSurvivalMultiArmConditionalDunnett(design = design, ...)) } .stopWithWrongDesignMessage(design) } #' #' Calculation of conditional power for Survival #' #' @noRd #' .getConditionalPowerSurvivalMultiArm <- function(..., stageResults, stage = stageResults$stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaH1 = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { design <- stageResults$.design gMax <- stageResults$getGMax() kMax <- design$kMax results <- ConditionalPowerResultsMultiArmSurvival( .design = design, .stageResults = stageResults, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) if (any(is.na(nPlanned))) { return(results) } .assertIsValidStage(stage, kMax) if (stage == kMax) { .logDebug( "Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", kMax, ")" ) return(results) } if (!.isValidNPlanned(nPlanned = nPlanned, kMax = kMax, stage = stage)) { return(results) } .assertIsValidNPlanned(nPlanned, kMax, stage) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) .setValueAndParameterType(results, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) thetaH1 <- .assertIsValidThetaH1ForMultiArm(thetaH1, stageResults, stage, results = results) results$.setParameterType("nPlanned", C_PARAM_USER_DEFINED) if (any(thetaH1 <= 0, na.rm = TRUE)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaH1' (", thetaH1, ") must be > 0") } if ((length(thetaH1) != 1) && (length(thetaH1) != gMax)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0( "length of 'thetaH1' (%s) must be ", "equal to 'gMax' (%s) or 1" ), .arrayToString(thetaH1), gMax) ) } if (.isTrialDesignInverseNormal(design)) { return(.getConditionalPowerSurvivalMultiArmInverseNormal( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, ... )) } else if (.isTrialDesignFisher(design)) { return(.getConditionalPowerSurvivalMultiArmFisher( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, iterations = iterations, seed = seed, ... )) } else if (.isTrialDesignConditionalDunnett(design)) { return(.getConditionalPowerSurvivalMultiArmConditionalDunnett( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, ... )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of TrialDesignInverseNormal, TrialDesignFisher, ", "or TrialDesignConditionalDunnett" ) } #' #' Calculation of conditional power based on inverse normal method #' #' @noRd #' .getConditionalPowerSurvivalMultiArmInverseNormal <- function(..., results, design, stageResults, stage, allocationRatioPlanned, nPlanned, thetaH1) { .assertIsTrialDesignInverseNormal(design) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerSurvivalMultiArmInverseNormal", ...) kMax <- design$kMax gMax <- stageResults$getGMax() weights <- .getWeightsInverseNormal(design) informationRates <- design$informationRates nPlanned <- c(rep(NA_real_, stage), nPlanned) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned if (length(thetaH1) == 1) { thetaH1 <- rep(thetaH1, gMax) results$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { results$.setParameterType("thetaH1", C_PARAM_DEFAULT_VALUE) } if (stageResults$directionUpper) { standardizedEffect <- log(thetaH1 / stageResults$thetaH0) } else { standardizedEffect <- -log(thetaH1 / stageResults$thetaH0) } ctr <- .performClosedCombinationTest(stageResults = stageResults) criticalValues <- design$criticalValues for (treatmentArm in 1:gMax) { if (!is.na(ctr$separatePValues[treatmentArm, stage])) { # shifted decision region for use in getGroupSeqProbs # Inverse Normal Method shiftedDecisionRegionUpper <- criticalValues[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, treatmentArm] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - standardizedEffect[treatmentArm] * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, treatmentArm] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - standardizedEffect[treatmentArm] * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) results$conditionalPower[treatmentArm, (stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$thetaH1 <- thetaH1 return(results) } #' #' Calculation of conditional power based on Fisher's combination test #' #' @noRd #' .getConditionalPowerSurvivalMultiArmFisher <- function(..., results, design, stageResults, stage, allocationRatioPlanned, nPlanned, thetaH1, iterations, seed) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerSurvivalMultiArmFisher", ...) kMax <- design$kMax gMax <- stageResults$getGMax() criticalValues <- design$criticalValues weightsFisher <- .getWeightsFisher(design) results$iterations <- as.integer(iterations) results$.setParameterType("iterations", C_PARAM_USER_DEFINED) results$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) results$seed <- .setSeed(seed) results$simulated <- FALSE results$.setParameterType("simulated", C_PARAM_DEFAULT_VALUE) if (length(thetaH1) == 1) { thetaH1 <- rep(thetaH1, gMax) results$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { results$.setParameterType("thetaH1", C_PARAM_DEFAULT_VALUE) } if (stageResults$directionUpper) { standardizedEffect <- log(thetaH1 / stageResults$thetaH0) } else { standardizedEffect <- -log(thetaH1 / stageResults$thetaH0) } nPlanned <- c(rep(NA_real_, stage), nPlanned) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned ctr <- .performClosedCombinationTest(stageResults = stageResults) for (treatmentArm in 1:gMax) { if (!is.na(ctr$separatePValues[treatmentArm, stage])) { if (gMax == 1) { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, treatmentArm] == 1, ][1:stage] } else { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, treatmentArm] == 1, ][which.max( ctr$overallAdjustedTestStatistics[ctr$indices[, treatmentArm] == 1, stage] ), 1:stage] } if (stage < kMax - 1) { for (k in (stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = standardizedEffect[treatmentArm], stage = stage, nPlanned = nPlanned ) } results$conditionalPower[treatmentArm, k] <- reject / iterations } results$simulated <- TRUE results$.setParameterType("simulated", C_PARAM_GENERATED) } else if (stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1 / weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Calculation not possible: ", "could not calculate conditional power for stage ", kMax, call. = FALSE ) results$conditionalPower[treatmentArm, kMax] <- NA_real_ } else { results$conditionalPower[treatmentArm, kMax] <- 1 - stats::pnorm(.getQNorm(result) - standardizedEffect[treatmentArm] * sqrt(nPlanned[kMax])) } } } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$thetaH1 <- thetaH1 if (!results$simulated) { results$iterations <- NA_integer_ results$seed <- NA_real_ results$.setParameterType("iterations", C_PARAM_NOT_APPLICABLE) results$.setParameterType("seed", C_PARAM_NOT_APPLICABLE) } return(results) } #' #' Calculation of conditional power based on conditional Dunnett test #' #' @noRd #' .getConditionalPowerSurvivalMultiArmConditionalDunnett <- function(..., results, design, stageResults, stage, allocationRatioPlanned, nPlanned, thetaH1) { .assertIsTrialDesignConditionalDunnett(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerSurvivalMultiArmConditionalDunnett", ignore = c("intersectionTest"), ... ) if (stage > 1) { warning("Conditional power is only calculated for the first (interim) stage", call. = FALSE) } gMax <- stageResults$getGMax() nPlanned <- c(rep(NA_real_, stage), nPlanned) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned if (length(thetaH1) == 1) { thetaH1 <- rep(thetaH1, gMax) results$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { results$.setParameterType("thetaH1", C_PARAM_DEFAULT_VALUE) } if (stageResults$directionUpper) { standardizedEffect <- log(thetaH1 / stageResults$thetaH0) } else { standardizedEffect <- -log(thetaH1 / stageResults$thetaH0) } ctr <- .getClosedConditionalDunnettTestResults(stageResults = stageResults, design = design, stage = stage) for (treatmentArm in 1:gMax) { if (!is.na(ctr$separatePValues[treatmentArm, stage])) { results$conditionalPower[treatmentArm, 2] <- 1 - stats::pnorm(.getOneMinusQNorm(min(ctr$conditionalErrorRate[ ctr$indices[, treatmentArm] == 1, stage ], na.rm = TRUE)) - standardizedEffect[treatmentArm] * sqrt(nPlanned[2])) } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$thetaH1 <- thetaH1 return(results) } #' #' Calculation of conditional power and likelihood values for plotting the graph #' #' @noRd #' .getConditionalPowerLikelihoodSurvivalMultiArm <- function(..., stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaRange, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) .associatedArgumentsAreDefined(nPlanned = nPlanned, thetaRange = thetaRange) design <- stageResults$.design kMax <- design$kMax gMax <- stageResults$getGMax() intersectionTest <- stageResults$intersectionTest thetaRange <- .assertIsValidThetaH1ForMultiArm(thetaH1 = thetaRange) if (length(thetaRange) == 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'thetaRange' (", .arrayToString(thetaRange), ") must be at least 2" ) } treatmentArms <- numeric(gMax * length(thetaRange)) effectValues <- numeric(gMax * length(thetaRange)) condPowerValues <- numeric(gMax * length(thetaRange)) likelihoodValues <- numeric(gMax * length(thetaRange)) stdErr <- 2 / sqrt(stageResults$.dataInput$getOverallEvents(stage = stage, group = (1:gMax))) results <- ConditionalPowerResultsMultiArmSurvival( .design = design, .stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) j <- 1 for (i in seq(along = thetaRange)) { for (treatmentArm in (1:gMax)) { treatmentArms[j] <- treatmentArm effectValues[j] <- thetaRange[i] if (.isTrialDesignInverseNormal(design)) { condPowerValues[j] <- .getConditionalPowerSurvivalMultiArmInverseNormal( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], ... )$conditionalPower[treatmentArm, kMax] } else if (.isTrialDesignFisher(design)) { condPowerValues[j] <- .getConditionalPowerSurvivalMultiArmFisher( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], iterations = iterations, seed = seed, ... )$conditionalPower[treatmentArm, kMax] } else if (.isTrialDesignConditionalDunnett(design)) { condPowerValues[j] <- .getConditionalPowerSurvivalMultiArmConditionalDunnett( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], ... )$conditionalPower[treatmentArm, 2] } likelihoodValues[j] <- stats::dnorm( log(thetaRange[i]), log(stageResults$effectSizes[treatmentArm, stage]), stdErr[treatmentArm] ) / stats::dnorm(0, 0, stdErr[treatmentArm]) j <- j + 1 } } subtitle <- paste0( "Intersection test = ", intersectionTest, ", Stage = ", stage, ", # of remaining events = ", sum(nPlanned), ", allocation ratio = ", .formatSubTitleValue(allocationRatioPlanned, "allocationRatioPlanned") ) return(list( treatmentArms = treatmentArms, xValues = effectValues, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, xlab = "Hazard ratio", ylab = C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD, sub = subtitle )) } rpact/R/f_simulation_enrichment_survival.R0000644000176200001440000011111214445307576020563 0ustar liggesusers## | ## | *Simulation of enrichment design with time to event data* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_simulation_enrichment.R NULL .getSimulationSurvivalEnrichmentStageEvents <- function(..., stage, directionUpper, conditionalPower, conditionalCriticalValue, plannedEvents, allocationRatioPlanned, selectedPopulations, thetaH1, overallEffects, minNumberOfEventsPerStage, maxNumberOfEventsPerStage) { stage <- stage - 1 # to be consistent with non-enrichment situation gMax <- nrow(overallEffects) if (!is.na(conditionalPower)) { if (any(selectedPopulations[1:gMax, stage + 1], na.rm = TRUE)) { if (is.na(thetaH1)) { if (directionUpper) { thetaStandardized <- log(max(min( overallEffects[selectedPopulations[1:gMax, stage + 1], stage], na.rm = TRUE ), 1 + 1e-07)) } else { thetaStandardized <- log(min(max( overallEffects[selectedPopulations[1:gMax, stage + 1], stage], na.rm = TRUE ), 1 - 1e-07)) } } else { if (directionUpper) { thetaStandardized <- log(max(thetaH1, 1 + 1e-07)) } else { thetaStandardized <- log(min(thetaH1, 1 - 1e-07)) } } if (conditionalCriticalValue[stage] > 8) { newEvents <- maxNumberOfEventsPerStage[stage + 1] } else { newEvents <- (1 + allocationRatioPlanned[stage])^2 / allocationRatioPlanned[stage] * (max(0, conditionalCriticalValue[stage] + .getQNorm(conditionalPower), na.rm = TRUE))^2 / thetaStandardized^2 newEvents <- min( max(minNumberOfEventsPerStage[stage + 1], newEvents), maxNumberOfEventsPerStage[stage + 1] ) } } else { newEvents <- 0 } } else { newEvents <- plannedEvents[stage + 1] - plannedEvents[stage] } return(newEvents) } .getSimulatedStageSurvivalEnrichment <- function(..., design, subsets, prevalences, piControls, hazardRatios, directionUpper, stratifiedAnalysis, plannedEvents, typeOfSelection, effectMeasure, adaptations, epsilonValue, rValue, threshold, allocationRatioPlanned, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, conditionalPower, thetaH1, calcEventsFunction, calcEventsFunctionIsUserDefined, selectPopulationsFunction) { kMax <- length(plannedEvents) pMax <- length(hazardRatios) gMax <- log(length(hazardRatios), 2) + 1 simLogRanks <- matrix(NA_real_, nrow = pMax, ncol = kMax) eventsPerStage <- matrix(NA_real_, nrow = pMax, ncol = kMax) populationEventsPerStage <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallEffects <- matrix(NA_real_, nrow = gMax, ncol = kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) logRankStatistics <- matrix(NA_real_, nrow = pMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) conditionalCriticalValue <- rep(NA_real_, kMax - 1) conditionalPowerPerStage <- rep(NA_real_, kMax) selectedPopulations <- matrix(FALSE, nrow = gMax, ncol = kMax) selectedSubsets <- matrix(FALSE, nrow = pMax, ncol = kMax) selectedPopulations[, 1] <- TRUE selectedSubsets[, 1] <- TRUE adjustedPValues <- rep(NA_real_, kMax) populationHazardRatios <- rep(NA_real_, gMax) if (.isTrialDesignFisher(design)) { weights <- .getWeightsFisher(design) } else if (.isTrialDesignInverseNormal(design)) { weights <- .getWeightsInverseNormal(design) } for (k in 1:kMax) { const <- allocationRatioPlanned[k] / (1 + allocationRatioPlanned[k])^2 selectedSubsets[, k] <- .createSelectedSubsets(k, selectedPopulations) if (is.null(piControls) || length(piControls) == 0) { if (k == 1) { eventsPerStage[, k] <- prevalences * (1 + allocationRatioPlanned[k] * hazardRatios) / sum(prevalences * (1 + allocationRatioPlanned[k] * hazardRatios), na.rm = TRUE) * plannedEvents[k] } else { prevSelected <- prevalences / sum(prevalences[selectedSubsets[, k]]) prevSelected[!selectedSubsets[, k]] <- 0 if (sum(prevSelected, na.rm = TRUE) > 0) { eventsPerStage[, k] <- prevSelected * (1 + allocationRatioPlanned[k] * hazardRatios) / sum(prevSelected * (1 + allocationRatioPlanned[k] * hazardRatios), na.rm = TRUE) * (plannedEvents[k] - plannedEvents[k - 1]) } else { break } } } else { rho <- (allocationRatioPlanned[k] * (1 - (1 - piControls)^hazardRatios) + piControls) / (1 + allocationRatioPlanned[k]) if (k == 1) { eventsPerStage[, k] <- prevalences * rho / sum(prevalences * rho, na.rm = TRUE) * plannedEvents[k] } else { prevSelected <- prevalences / sum(prevalences[selectedSubsets[, k]]) prevSelected[!selectedSubsets[, k]] <- 0 if (sum(prevSelected, na.rm = TRUE) > 0) { eventsPerStage[, k] <- prevSelected * rho / sum(prevSelected * rho, na.rm = TRUE) * (plannedEvents[k] - plannedEvents[k - 1]) } else { break } } } logRankStatistics[, k] <- (2 * directionUpper - 1) * stats::rnorm(pMax, log(hazardRatios) * sqrt(const * eventsPerStage[, k]), 1) if (gMax == 1) { testStatistics[1, k] <- logRankStatistics[1, k] populationEventsPerStage[1, k] <- eventsPerStage[1, k] overallTestStatistics[1, k] <- sum(sqrt(eventsPerStage[1, 1:k]) * testStatistics[1, 1:k], na.rm = TRUE) / sqrt(sum(eventsPerStage[1, 1:k], na.rm = TRUE)) overallEffects[1, k] <- exp((2 * directionUpper - 1) * overallTestStatistics[1, k] / sqrt(const) / sqrt(sum(eventsPerStage[1, 1:k], na.rm = TRUE))) } else if (gMax == 2) { # Population S1 testStatistics[1, k] <- logRankStatistics[1, k] populationEventsPerStage[1, k] <- eventsPerStage[1, k] overallTestStatistics[1, k] <- sum(sqrt(eventsPerStage[1, 1:k]) * testStatistics[1, 1:k], na.rm = TRUE) / sqrt(sum(eventsPerStage[1, 1:k], na.rm = TRUE)) overallEffects[1, k] <- exp((2 * directionUpper - 1) * overallTestStatistics[1, k] / sqrt(const) / sqrt(sum(eventsPerStage[1, 1:k], na.rm = TRUE))) # Full population testStatistics[2, k] <- sum(sqrt(eventsPerStage[1:2, k]) * logRankStatistics[1:2, k], na.rm = TRUE) / sqrt(sum(eventsPerStage[1:2, k], na.rm = TRUE)) populationEventsPerStage[2, k] <- sum(eventsPerStage[1:2, k], na.rm = TRUE) overallTestStatistics[2, k] <- sum(sqrt(populationEventsPerStage[2, 1:k]) * testStatistics[2, 1:k], na.rm = TRUE) / sqrt(sum(populationEventsPerStage[2, 1:k], na.rm = TRUE)) overallEffects[2, k] <- exp((2 * directionUpper - 1) * overallTestStatistics[2, k] / sqrt(const) / sqrt(sum(populationEventsPerStage[2, 1:k], na.rm = TRUE))) } else if (gMax == 3) { # Population S1 testStatistics[1, k] <- sum(sqrt(eventsPerStage[c(1, 3), k]) * logRankStatistics[c(1, 3), k], na.rm = TRUE) / sqrt(sum(eventsPerStage[c(1, 3), k], na.rm = TRUE)) populationEventsPerStage[1, k] <- sum(eventsPerStage[c(1, 3), k], na.rm = TRUE) overallTestStatistics[1, k] <- sum(sqrt(populationEventsPerStage[1, 1:k]) * testStatistics[1, 1:k], na.rm = TRUE) / sqrt(sum(populationEventsPerStage[1, 1:k], na.rm = TRUE)) overallEffects[1, k] <- exp((2 * directionUpper - 1) * overallTestStatistics[1, k] / sqrt(const) / sqrt(sum(populationEventsPerStage[1, 1:k], na.rm = TRUE))) # Population S2 testStatistics[2, k] <- sum(sqrt(eventsPerStage[c(2, 3), k]) * logRankStatistics[c(2, 3), k], na.rm = TRUE) / sqrt(sum(eventsPerStage[c(2, 3), k], na.rm = TRUE)) populationEventsPerStage[2, k] <- sum(eventsPerStage[c(2, 3), k], na.rm = TRUE) overallTestStatistics[2, k] <- sum(sqrt(populationEventsPerStage[2, 1:k]) * testStatistics[2, 1:k], na.rm = TRUE) / sqrt(sum(populationEventsPerStage[2, 1:k], na.rm = TRUE)) overallEffects[2, k] <- exp((2 * directionUpper - 1) * overallTestStatistics[2, k] / sqrt(const) / sqrt(sum(populationEventsPerStage[2, 1:k], na.rm = TRUE))) # Full population testStatistics[3, k] <- sum(sqrt(eventsPerStage[1:4, k]) * logRankStatistics[1:4, k], na.rm = TRUE) / sqrt(sum(eventsPerStage[1:4, k], na.rm = TRUE)) populationEventsPerStage[3, k] <- sum(eventsPerStage[1:4, k], na.rm = TRUE) overallTestStatistics[3, k] <- sum(sqrt(populationEventsPerStage[3, 1:k]) * testStatistics[3, 1:k], na.rm = TRUE) / sqrt(sum(populationEventsPerStage[3, 1:k], na.rm = TRUE)) overallEffects[3, k] <- exp((2 * directionUpper - 1) * overallTestStatistics[3, k] / sqrt(const) / sqrt(sum(populationEventsPerStage[3, 1:k], na.rm = TRUE))) } else if (gMax == 4) { # Population S1 testStatistics[1, k] <- sum(sqrt(eventsPerStage[c(1, 4, 5, 7), k]) * logRankStatistics[c(1, 4, 5, 7), k], na.rm = TRUE) / sqrt(sum(eventsPerStage[c(1, 4, 5, 7), k], na.rm = TRUE)) populationEventsPerStage[1, k] <- sum(eventsPerStage[c(1, 4, 5, 7), k], na.rm = TRUE) overallTestStatistics[1, k] <- sum(sqrt(populationEventsPerStage[1, 1:k]) * testStatistics[1, 1:k], na.rm = TRUE) / sqrt(sum(populationEventsPerStage[1, 1:k], na.rm = TRUE)) overallEffects[1, k] <- exp((2 * directionUpper - 1) * overallTestStatistics[1, k] / sqrt(const) / sqrt(sum(populationEventsPerStage[1, 1:k], na.rm = TRUE))) # Population S2 testStatistics[2, k] <- sum(sqrt(eventsPerStage[c(2, 4, 6, 7), k]) * logRankStatistics[c(2, 4, 6, 7), k], na.rm = TRUE) / sqrt(sum(eventsPerStage[c(2, 4, 6, 7), k], na.rm = TRUE)) populationEventsPerStage[2, k] <- sum(eventsPerStage[c(2, 4, 6, 7), k], na.rm = TRUE) overallTestStatistics[2, k] <- sum(sqrt(populationEventsPerStage[2, 1:k]) * testStatistics[2, 1:k], na.rm = TRUE) / sqrt(sum(populationEventsPerStage[2, 1:k], na.rm = TRUE)) overallEffects[2, k] <- exp((2 * directionUpper - 1) * overallTestStatistics[2, k] / sqrt(const) / sqrt(sum(populationEventsPerStage[2, 1:k], na.rm = TRUE))) # Population S3 testStatistics[3, k] <- sum(sqrt(eventsPerStage[c(3, 5, 6, 7), k]) * logRankStatistics[c(3, 5, 6, 7), k], na.rm = TRUE) / sqrt(sum(eventsPerStage[c(3, 5, 6, 7), k], na.rm = TRUE)) populationEventsPerStage[3, k] <- sum(eventsPerStage[c(3, 5, 6, 7), k], na.rm = TRUE) overallTestStatistics[3, k] <- sum(sqrt(populationEventsPerStage[3, 1:k]) * testStatistics[3, 1:k], na.rm = TRUE) / sqrt(sum(populationEventsPerStage[3, 1:k], na.rm = TRUE)) overallEffects[3, k] <- exp((2 * directionUpper - 1) * overallTestStatistics[3, k] / sqrt(const) / sqrt(sum(populationEventsPerStage[3, 1:k], na.rm = TRUE))) # Full population testStatistics[4, k] <- sum(sqrt(eventsPerStage[1:8, k]) * logRankStatistics[1:8, k], na.rm = TRUE) / sqrt(sum(eventsPerStage[1:8, k], na.rm = TRUE)) populationEventsPerStage[4, k] <- sum(eventsPerStage[1:8, k], na.rm = TRUE) overallTestStatistics[4, k] <- sum(sqrt(populationEventsPerStage[4, 1:k]) * testStatistics[4, 1:k], na.rm = TRUE) / sqrt(sum(populationEventsPerStage[4, 1:k], na.rm = TRUE)) overallEffects[4, k] <- exp((2 * directionUpper - 1) * overallTestStatistics[4, k] / sqrt(const) / sqrt(sum(populationEventsPerStage[4, 1:k], na.rm = TRUE))) } testStatistics[!selectedPopulations[, k], k] <- NA_real_ overallEffects[!selectedPopulations[, k], k] <- NA_real_ overallTestStatistics[!selectedPopulations[, k], k] <- NA_real_ separatePValues[, k] <- 1 - stats::pnorm(testStatistics[, k]) if (k < kMax) { if (colSums(selectedPopulations)[k] == 0) { break } # Bonferroni adjustment adjustedPValues[k] <- min(min(separatePValues[, k], na.rm = TRUE) * (colSums(selectedPopulations)[k]), 1 - 1e-7) # conditional critical value to reject the null hypotheses at the next stage of the trial if (.isTrialDesignFisher(design)) { conditionalCriticalValue[k] <- .getOneMinusQNorm(min((design$criticalValues[k + 1] / prod(adjustedPValues[1:k]^weights[1:k]))^(1 / weights[k + 1]), 1 - 1e-7)) } else { conditionalCriticalValue[k] <- (design$criticalValues[k + 1] * sqrt(design$informationRates[k + 1]) - .getOneMinusQNorm(adjustedPValues[1:k]) %*% weights[1:k]) / sqrt(design$informationRates[k + 1] - design$informationRates[k]) } if (adaptations[k]) { if (effectMeasure == "testStatistic") { selectedPopulations[, k + 1] <- (selectedPopulations[, k] & .selectPopulations( k, overallTestStatistics[, k], typeOfSelection, epsilonValue, rValue, threshold, selectPopulationsFunction )) } else if (effectMeasure == "effectEstimate") { if (directionUpper) { selectedPopulations[, k + 1] <- (selectedPopulations[, k] & .selectPopulations( k, overallEffects[, k], typeOfSelection, epsilonValue, rValue, threshold, selectPopulationsFunction )) } else { selectedPopulations[, k + 1] <- (selectedPopulations[, k] & .selectPopulations( k, 1 / overallEffects[, k], typeOfSelection, epsilonValue, rValue, 1 / threshold, selectPopulationsFunction )) } } newEvents <- calcEventsFunction( stage = k + 1, # to be consistent with non-enrichment situation, cf. line 38 directionUpper = directionUpper, conditionalPower = conditionalPower, conditionalCriticalValue = conditionalCriticalValue, plannedEvents = plannedEvents, allocationRatioPlanned = allocationRatioPlanned, selectedPopulations = selectedPopulations, thetaH1 = thetaH1, overallEffects = overallEffects, minNumberOfEventsPerStage = minNumberOfEventsPerStage, maxNumberOfEventsPerStage = maxNumberOfEventsPerStage ) if (is.null(newEvents) || length(newEvents) != 1 || !is.numeric(newEvents) || is.na(newEvents)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'calcEventsFunction' returned an illegal or undefined result (", newEvents, "); ", "the output must be a single numeric value" ) } if (!is.na(conditionalPower) || calcEventsFunctionIsUserDefined) { plannedEvents[(k + 1):kMax] <- plannedEvents[k] + cumsum(rep(newEvents, kMax - k)) } } else { selectedPopulations[, k + 1] <- selectedPopulations[, k] } if (is.na(thetaH1)) { if (directionUpper) { thetaStandardized <- log(min(overallEffects[selectedPopulations[1:gMax, k], k], na.rm = TRUE)) } else { thetaStandardized <- log(max(overallEffects[selectedPopulations[1:gMax, k], k], na.rm = TRUE)) } } else { thetaStandardized <- log(thetaH1) } thetaStandardized <- (2 * directionUpper - 1) * thetaStandardized conditionalPowerPerStage[k] <- 1 - stats::pnorm(conditionalCriticalValue[k] - thetaStandardized * sqrt(plannedEvents[k + 1] - plannedEvents[k]) * sqrt(const)) } } return(list( eventsPerStage = eventsPerStage, plannedEvents = plannedEvents, allocationRatioPlanned = allocationRatioPlanned, overallEffects = overallEffects, testStatistics = testStatistics, overallTestStatistics = overallTestStatistics, separatePValues = separatePValues, conditionalCriticalValue = conditionalCriticalValue, conditionalPowerPerStage = conditionalPowerPerStage, selectedPopulations = selectedPopulations )) } #' #' @title #' Get Simulation Enrichment Survival #' #' @description #' Returns the simulated power, stopping and selection probabilities, conditional power, #' and expected sample size for testing hazard ratios in an enrichment design testing situation. #' In contrast to \code{getSimulationSurvival()} (where survival times are simulated), normally #' distributed logrank test statistics are simulated. #' #' @inheritParams param_intersectionTest_Enrichment #' @inheritParams param_typeOfSelection #' @inheritParams param_effectMeasure #' @inheritParams param_adaptations #' @inheritParams param_threshold #' @inheritParams param_effectList #' @inheritParams param_successCriterion #' @inheritParams param_typeOfSelection #' @inheritParams param_design_with_default #' @inheritParams param_directionUpper #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_minNumberOfEventsPerStage #' @inheritParams param_maxNumberOfEventsPerStage #' @inheritParams param_conditionalPowerSimulation #' @inheritParams param_thetaH1 #' @inheritParams param_plannedEvents #' @inheritParams param_maxNumberOfIterations #' @inheritParams param_calcEventsFunction #' @inheritParams param_selectPopulationsFunction #' @inheritParams param_rValue #' @inheritParams param_epsilonValue #' @inheritParams param_seed #' @inheritParams param_three_dots #' @inheritParams param_showStatistics #' @inheritParams param_stratifiedAnalysis #' #' @details #' At given design the function simulates the power, stopping probabilities, #' selection probabilities, and expected event number at given number of events, #' parameter configuration, and population selection rule in the enrichment situation. #' An allocation ratio can be specified referring to the ratio of number of subjects #' in the active treatment group as compared to the control group. #' #' The definition of \code{thetaH1} makes only sense if \code{kMax} > 1 #' and if \code{conditionalPower}, \code{minNumberOfEventsPerStage}, and #' \code{maxNumberOfEventsPerStage} (or \code{calcEventsFunction}) are defined. #' #' \code{calcEventsFunction}\cr #' This function returns the number of events at given conditional power #' and conditional critical value for specified testing situation. #' The function might depend on the variables #' \code{stage}, #' \code{selectedPopulations}, #' \code{plannedEvents}, #' \code{directionUpper}, #' \code{allocationRatioPlanned}, #' \code{minNumberOfEventsPerStage}, #' \code{maxNumberOfEventsPerStage}, #' \code{conditionalPower}, #' \code{conditionalCriticalValue}, and #' \code{overallEffects}. #' The function has to contain the three-dots argument '...' (see examples). #' #' @template return_object_simulation_results #' @template how_to_get_help_for_generics #' #' @template examples_get_simulation_enrichment_survival #' #' @export #' getSimulationEnrichmentSurvival <- function(design = NULL, ..., effectList = NULL, intersectionTest = c("Simes", "SpiessensDebois", "Bonferroni", "Sidak"), # C_INTERSECTION_TEST_ENRICHMENT_DEFAULT stratifiedAnalysis = TRUE, # C_STRATIFIED_ANALYSIS_DEFAULT directionUpper = TRUE, # C_DIRECTION_UPPER_DEFAULT adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), # C_TYPE_OF_SELECTION_DEFAULT effectMeasure = c("effectEstimate", "testStatistic"), # C_EFFECT_MEASURE_DEFAULT successCriterion = c("all", "atLeastOne"), # C_SUCCESS_CRITERION_DEFAULT epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedEvents = NA_real_, allocationRatioPlanned = NA_real_, minNumberOfEventsPerStage = NA_real_, maxNumberOfEventsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, maxNumberOfIterations = 1000L, # C_MAX_SIMULATION_ITERATIONS_DEFAULT seed = NA_real_, calcEventsFunction = NULL, selectPopulationsFunction = NULL, showStatistics = FALSE) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "simulation") .warnInCaseOfUnknownArguments( functionName = "getSimulationEnrichmentSurvival", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "showStatistics"), ... ) } else { .assertIsTrialDesignInverseNormalOrFisher(design) .warnInCaseOfUnknownArguments(functionName = "getSimulationEnrichmentSurvival", ignore = "showStatistics", ...) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsOneSidedDesign(design, designType = "enrichment", engineType = "simulation") calcEventsFunctionIsUserDefined <- !is.null(calcEventsFunction) simulationResults <- .createSimulationResultsEnrichmentObject( design = design, effectList = effectList, intersectionTest = intersectionTest, stratifiedAnalysis = stratifiedAnalysis, directionUpper = directionUpper, # rates + survival only adaptations = adaptations, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, successCriterion = successCriterion, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, plannedEvents = plannedEvents, # survival only allocationRatioPlanned = allocationRatioPlanned, minNumberOfEventsPerStage = minNumberOfEventsPerStage, # survival only maxNumberOfEventsPerStage = maxNumberOfEventsPerStage, # survival only conditionalPower = conditionalPower, thetaH1 = thetaH1, # means + survival only maxNumberOfIterations = maxNumberOfIterations, seed = seed, calcEventsFunction = calcEventsFunction, # survival only selectPopulationsFunction = selectPopulationsFunction, showStatistics = showStatistics, endpoint = "survival" ) design <- simulationResults$.design successCriterion <- simulationResults$successCriterion effectMeasure <- simulationResults$effectMeasure adaptations <- simulationResults$adaptations gMax <- simulationResults$populations kMax <- simulationResults$.design$kMax intersectionTest <- simulationResults$intersectionTest typeOfSelection <- simulationResults$typeOfSelection effectList <- simulationResults$effectList thetaH1 <- simulationResults$thetaH1 # means + survival only plannedEvents <- simulationResults$plannedEvents # survival only conditionalPower <- simulationResults$conditionalPower minNumberOfEventsPerStage <- simulationResults$minNumberOfEventsPerStage # survival only maxNumberOfEventsPerStage <- simulationResults$maxNumberOfEventsPerStage # survival only allocationRatioPlanned <- simulationResults$allocationRatioPlanned calcEventsFunction <- simulationResults$calcEventsFunction if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, kMax) } indices <- .getIndicesOfClosedHypothesesSystemForSimulation(gMax = gMax) cols <- nrow(effectList$hazardRatios) simulatedSelections <- array(0, dim = c(kMax, cols, gMax)) simulatedRejections <- array(0, dim = c(kMax, cols, gMax)) simulatedNumberOfPopulations <- matrix(0, nrow = kMax, ncol = cols) simulatedSingleEventsPerStage <- array(0, dim = c(kMax, cols, 2^(gMax - 1))) simulatedOverallEventsPerStage <- matrix(0, nrow = kMax, ncol = cols) simulatedSuccessStopping <- matrix(0, nrow = kMax, ncol = cols) simulatedFutilityStopping <- matrix(0, nrow = kMax - 1, ncol = cols) simulatedConditionalPower <- matrix(0, nrow = kMax, ncol = cols) simulatedRejectAtLeastOne <- rep(0, cols) expectedNumberOfEvents <- rep(0, cols) iterations <- matrix(0, nrow = kMax, ncol = cols) len <- maxNumberOfIterations * kMax * gMax * cols dataIterationNumber <- rep(NA_real_, len) dataStageNumber <- rep(NA_real_, len) dataArmNumber <- rep(NA_real_, len) dataAlternative <- rep(NA_real_, len) dataEffect <- rep(NA_real_, len) dataNumberOfEvents <- rep(NA_real_, len) dataRejectPerStage <- rep(NA, len) dataFutilityStop <- rep(NA_real_, len) dataSuccessStop <- rep(NA, len) dataFutilityStop <- rep(NA, len) dataTestStatistics <- rep(NA_real_, len) dataConditionalCriticalValue <- rep(NA_real_, len) dataConditionalPowerAchieved <- rep(NA_real_, len) dataEffectEstimate <- rep(NA_real_, len) dataPValuesSeparate <- rep(NA_real_, len) index <- 1 for (i in 1:cols) { for (j in 1:maxNumberOfIterations) { stageResults <- .getSimulatedStageSurvivalEnrichment( design = design, subsets = effectList$subsets, prevalences = effectList$prevalences, piControls = effectList$piControls, hazardRatios = effectList$hazardRatios[i, ], directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, plannedEvents = plannedEvents, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, adaptations = adaptations, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, allocationRatioPlanned = allocationRatioPlanned, minNumberOfEventsPerStage = minNumberOfEventsPerStage, maxNumberOfEventsPerStage = maxNumberOfEventsPerStage, conditionalPower = conditionalPower, thetaH1 = thetaH1, calcEventsFunction = calcEventsFunction, calcEventsFunctionIsUserDefined = calcEventsFunctionIsUserDefined, selectPopulationsFunction = selectPopulationsFunction ) closedTest <- .performClosedCombinationTestForSimulationEnrichment( stageResults = stageResults, design = design, indices = indices, intersectionTest = intersectionTest, successCriterion = successCriterion ) rejectAtSomeStage <- FALSE rejectedPopulationsBefore <- rep(FALSE, gMax) for (k in 1:kMax) { simulatedRejections[k, i, ] <- simulatedRejections[k, i, ] + (closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore) simulatedSelections[k, i, ] <- simulatedSelections[k, i, ] + closedTest$selectedPopulations[, k] simulatedSingleEventsPerStage[k, i, ] <- simulatedSingleEventsPerStage[k, i, ] + stageResults$eventsPerStage[, k] simulatedNumberOfPopulations[k, i] <- simulatedNumberOfPopulations[k, i] + sum(closedTest$selectedPopulations[, k]) if (!any(is.na(closedTest$successStop))) { simulatedSuccessStopping[k, i] <- simulatedSuccessStopping[k, i] + closedTest$successStop[k] } if ((kMax > 1) && (k < kMax)) { if (!any(is.na(closedTest$futilityStop))) { simulatedFutilityStopping[k, i] <- simulatedFutilityStopping[k, i] + (closedTest$futilityStop[k] && !closedTest$successStop[k]) } if (!closedTest$successStop[k] && !closedTest$futilityStop[k]) { simulatedConditionalPower[k + 1, i] <- simulatedConditionalPower[k + 1, i] + stageResults$conditionalPowerPerStage[k] } } iterations[k, i] <- iterations[k, i] + 1 if (k == 1) { simulatedOverallEventsPerStage[k, i] <- simulatedOverallEventsPerStage[k, i] + stageResults$plannedEvents[k] } else { simulatedOverallEventsPerStage[k, i] <- simulatedOverallEventsPerStage[k, i] + stageResults$plannedEvents[k] - stageResults$plannedEvents[k - 1] } for (g in 1:gMax) { dataIterationNumber[index] <- j dataStageNumber[index] <- k dataArmNumber[index] <- g dataAlternative[index] <- i dataEffect[index] <- effectList$hazardRatios[i, g] dataNumberOfEvents[index] <- round(stageResults$eventsPerStage[g, k], 1) dataRejectPerStage[index] <- closedTest$rejected[g, k] dataTestStatistics[index] <- stageResults$testStatistics[g, k] dataSuccessStop[index] <- closedTest$successStop[k] if (k < kMax) { dataFutilityStop[index] <- closedTest$futilityStop[k] dataConditionalCriticalValue[index] <- stageResults$conditionalCriticalValue[k] dataConditionalPowerAchieved[index + 1] <- stageResults$conditionalPowerPerStage[k] } dataEffectEstimate[index] <- stageResults$overallEffects[g, k] dataPValuesSeparate[index] <- closedTest$separatePValues[g, k] index <- index + 1 } if (!rejectAtSomeStage && any(closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore)) { simulatedRejectAtLeastOne[i] <- simulatedRejectAtLeastOne[i] + 1 rejectAtSomeStage <- TRUE } if ((k < kMax) && (closedTest$successStop[k] || closedTest$futilityStop[k])) { # rejected hypotheses remain rejected also in case of early stopping simulatedRejections[(k + 1):kMax, i, ] <- simulatedRejections[(k + 1):kMax, i, ] + matrix((closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore), kMax - k, gMax, byrow = TRUE ) break } rejectedPopulationsBefore <- closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore } } simulatedSingleEventsPerStage[, i, ] <- simulatedSingleEventsPerStage[, i, ] / iterations[, i] simulatedOverallEventsPerStage[, i] <- simulatedOverallEventsPerStage[, i] / iterations[, i] if (kMax > 1) { simulatedRejections[2:kMax, i, ] <- simulatedRejections[2:kMax, i, ] - simulatedRejections[1:(kMax - 1), i, ] stopping <- cumsum(simulatedSuccessStopping[1:(kMax - 1), i] + simulatedFutilityStopping[, i]) / maxNumberOfIterations expectedNumberOfEvents[i] <- simulatedOverallEventsPerStage[1, i] + t(1 - stopping) %*% simulatedOverallEventsPerStage[2:kMax, i] } else { expectedNumberOfEvents[i] <- simulatedOverallEventsPerStage[1, i] } } simulatedConditionalPower[1, ] <- NA_real_ if (kMax > 1) { simulatedConditionalPower[2:kMax, ] <- simulatedConditionalPower[2:kMax, ] / iterations[2:kMax, ] } simulationResults$rejectAtLeastOne <- simulatedRejectAtLeastOne / maxNumberOfIterations simulationResults$numberOfPopulations <- simulatedNumberOfPopulations / iterations simulationResults$selectedPopulations <- simulatedSelections / maxNumberOfIterations simulationResults$rejectedPopulationsPerStage <- simulatedRejections / maxNumberOfIterations simulationResults$successPerStage <- simulatedSuccessStopping / maxNumberOfIterations simulationResults$futilityPerStage <- simulatedFutilityStopping / maxNumberOfIterations simulationResults$futilityStop <- base::colSums(simulatedFutilityStopping / maxNumberOfIterations) if (kMax > 1) { simulationResults$earlyStop <- simulationResults$futilityPerStage + simulationResults$successPerStage[1:(kMax - 1), ] simulationResults$conditionalPowerAchieved <- simulatedConditionalPower } simulationResults$singleNumberOfEventsPerStage <- simulatedSingleEventsPerStage simulationResults$.setParameterType("singleNumberOfEventsPerStage", C_PARAM_GENERATED) simulationResults$expectedNumberOfEvents <- expectedNumberOfEvents simulationResults$iterations <- iterations if (!all(is.na(simulationResults$conditionalPowerAchieved))) { simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } if (any(simulationResults$rejectedPopulationsPerStage < 0)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "internal error, simulation not possible due to numerical overflow") } data <- data.frame( iterationNumber = dataIterationNumber, stageNumber = dataStageNumber, populationNumber = dataArmNumber, omegaMax = dataAlternative, effect = dataEffect, numberOfEvents = dataNumberOfEvents, effectEstimate = dataEffectEstimate, testStatistics = dataTestStatistics, pValue = dataPValuesSeparate, conditionalCriticalValue = round(dataConditionalCriticalValue, 6), conditionalPowerAchieved = round(dataConditionalPowerAchieved, 6), rejectPerStage = dataRejectPerStage, successStop = dataSuccessStop, futilityPerStage = dataFutilityStop ) data <- data[!is.na(data$effectEstimate), ] simulationResults$.data <- data return(simulationResults) } rpact/R/class_analysis_stage_results.R0000644000176200001440000016670114445307575017714 0ustar liggesusers## | ## | *Stage results classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' #' @name StageResults #' #' @title #' Basic Stage Results #' #' @description #' Basic class for stage results. #' #' @template field_stages #' @template field_testStatistics #' @template field_pValues #' @template field_combInverseNormal #' @template field_combFisher #' @template field_effectSizes #' @template field_testActions #' @template field_weightsFisher #' @template field_weightsInverseNormal #' #' @details #' \code{StageResults} is the basic class for #' \itemize{ #' \item \code{\link{StageResultsMeans}}, #' \item \code{\link{StageResultsRates}}, #' \item \code{\link{StageResultsSurvival}}, #' \item \code{\link{StageResultsMultiArmMeans}}, #' \item \code{\link{StageResultsMultiArmRates}}, #' \item \code{\link{StageResultsMultiArmSurvival}}, #' \item \code{\link{StageResultsEnrichmentMeans}}, #' \item \code{\link{StageResultsEnrichmentRates}}, and #' \item \code{\link{StageResultsEnrichmentSurvival}}. #' } #' #' @include f_core_utilities.R #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' @include class_core_plot_settings.R #' #' @keywords internal #' #' @importFrom methods new #' StageResults <- setRefClass("StageResults", contains = "ParameterSet", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .dataInput = "Dataset", stage = "integer", stages = "integer", pValues = "numeric", weightsFisher = "numeric", weightsInverseNormal = "numeric", thetaH0 = "numeric", direction = "character" ), methods = list( initialize = function(...) { callSuper(...) }, init = function(design, dataInput) { .design <<- design .dataInput <<- dataInput .plotSettings <<- PlotSettings() if (!missing(design)) { stages <<- c(1:design$kMax) if (design$kMax == C_KMAX_DEFAULT) { .setParameterType("stages", C_PARAM_DEFAULT_VALUE) } else { .setParameterType("stages", C_PARAM_USER_DEFINED) } .parameterNames <<- .getParameterNames(design = design) } .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS .setParameterType("stage", C_PARAM_NOT_APPLICABLE) .setParameterType("pValues", ifelse( .isMultiArm(), C_PARAM_NOT_APPLICABLE, C_PARAM_GENERATED )) .setParameterType("thetaH0", ifelse( identical(thetaH0, C_THETA_H0_MEANS_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) .setParameterType("direction", ifelse( identical(direction, C_DIRECTION_UPPER), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing stage results" .resetCat() if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .cat(.toString(startWithUpperCase = TRUE), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), "Output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) if (grepl("Enrichment", .getClassName(.self))) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) .cat(paste0(" S[i]: population i\n"), consoleOutputEnabled = consoleOutputEnabled) .cat(paste0(" F: full population\n"), consoleOutputEnabled = consoleOutputEnabled) } else if (grepl("MultiArm", .getClassName(.self))) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) .cat( paste0( " (i): results of treatment arm i vs. control group ", .dataInput$getNumberOfGroups(), "\n" ), consoleOutputEnabled = consoleOutputEnabled ) } else if (.dataInput$getNumberOfGroups(survivalCorrectionEnabled = FALSE) >= 2) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) .cat(" (i): values of treatment arm i\n", consoleOutputEnabled = consoleOutputEnabled) } } }, isDirectionUpper = function() { return(direction == C_DIRECTION_UPPER) }, .isMultiArm = function() { return(grepl("multi", tolower(.getClassName(.self)))) }, .isEnrichment = function() { return(grepl("enrichment", tolower(.getClassName(.self)))) }, getGMax = function() { if (!is.matrix(testStatistics)) { return(1L) } gMax <- nrow(testStatistics) if (is.null(gMax) || gMax == 0) { gMax <- 1L } return(gMax) }, .getParametersToShow = function() { return(c("stages")) }, .toString = function(startWithUpperCase = FALSE) { s <- "stage results of" if (grepl("MultiArm", .getClassName(.self))) { s <- paste(s, "multi-arm") } else if (grepl("Enrichment", .getClassName(.self))) { s <- paste(s, "enrichment") } if (grepl("Means", .getClassName(.self))) { s <- paste(s, "means") } if (grepl("Rates", .getClassName(.self))) { s <- paste(s, "rates") } if (grepl("Survival", .getClassName(.self))) { s <- paste(s, "survival data") } if (startWithUpperCase) { s <- .firstCharacterToUpperCase(s) } return(s) }, getDataInput = function() { return(.dataInput) }, getNumberOfGroups = function() { return(.dataInput$getNumberOfGroups()) }, isOneSampleDataset = function() { return(getNumberOfGroups() == 1) }, isTwoSampleDataset = function() { return(getNumberOfGroups() == 2) }, isDatasetMeans = function() { return(.dataInput$isDatasetMeans()) }, isDatasetRates = function() { return(.dataInput$isDatasetRates()) }, isDatasetSurvival = function() { return(.dataInput$isDatasetSurvival()) }, getNumberOfStages = function() { if (.isMultiArm()) { if (inherits(.self, "StageResultsMultiArmRates")) { return(max( ncol(stats::na.omit(testStatistics)), ncol(stats::na.omit(separatePValues)) )) } return(max( ncol(stats::na.omit(effectSizes)), ncol(stats::na.omit(separatePValues)) )) } return(max( length(stats::na.omit(effectSizes)), length(stats::na.omit(pValues)) )) } ) ) #' #' @name StageResultsMeans #' #' @title #' Stage Results of Means #' #' @description #' Class for stage results of means. #' #' @template field_stages #' @template field_testStatistics #' @template field_overallTestStatistics #' @template field_pValues #' @template field_overallPValues #' @template field_effectSizes #' @template field_testActions #' @template field_direction #' @template field_normalApproximation #' @template field_equalVariances #' @template field_combFisher #' @template field_weightsFisher #' @template field_combInverseNormal #' @template field_weightsInverseNormal #' @field ... Names of \code{dataInput}. #' #' @details #' This object cannot be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of means. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsMeans <- setRefClass("StageResultsMeans", contains = "StageResults", fields = list( combInverseNormal = "numeric", combFisher = "numeric", overallTestStatistics = "numeric", overallPValues = "numeric", effectSizes = "numeric", testStatistics = "numeric", overallMeans = "numeric", overallMeans1 = "numeric", overallMeans2 = "numeric", overallStDevs = "numeric", overallStDevs1 = "numeric", overallStDevs2 = "numeric", overallSampleSizes = "numeric", overallSampleSizes1 = "numeric", overallSampleSizes2 = "numeric", equalVariances = "logical", normalApproximation = "logical" ), methods = list( initialize = function(design, dataInput, ..., equalVariances = TRUE, normalApproximation = FALSE) { callSuper( .design = design, .dataInput = dataInput, ..., equalVariances = equalVariances, normalApproximation = normalApproximation ) init(design = design, dataInput = dataInput) for (param in c( "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal" )) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } .setParameterType("equalVariances", ifelse( identical(equalVariances, TRUE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) .setParameterType("normalApproximation", ifelse( identical(normalApproximation, FALSE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) }, .getParametersToShow = function() { parametersToShow <- c( "stages", "overallTestStatistics", "overallPValues" ) if (.dataInput$getNumberOfGroups() == 1) { parametersToShow <- c( parametersToShow, "overallMeans", "overallStDevs", "overallSampleSizes" ) } else if (.dataInput$getNumberOfGroups() == 2) { parametersToShow <- c( parametersToShow, "overallMeans1", "overallMeans2", "overallStDevs1", "overallStDevs2", "overallSampleSizes1", "overallSampleSizes2" ) } parametersToShow <- c( parametersToShow, "testStatistics", "pValues", "effectSizes" ) if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c( parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c( parametersToShow, "combFisher", "weightsFisher" ) } parametersToShow <- c( parametersToShow, "thetaH0", "direction", "normalApproximation" ) if (.dataInput$getNumberOfGroups() == 2) { parametersToShow <- c( parametersToShow, "equalVariances" ) } return(parametersToShow) } ) ) #' @name StageResultsMultiArmMeans #' #' @title #' Stage Results Multi Arm Means #' #' @description #' Class for stage results of multi arm means data #' #' @template field_stages #' @template field_testStatistics #' @template field_pValues #' @template field_combInverseNormal #' @template field_combFisher #' @template field_effectSizes #' @template field_testActions #' @template field_weightsFisher #' @template field_weightsInverseNormal #' @template field_combInverseNormal #' @template field_combFisher #' @template field_overallTestStatistics #' @template field_overallStDevs #' @template field_overallPooledStDevs #' @template field_overallPValues #' @template field_testStatistics #' @template field_separatePValues #' @template field_effectSizes #' @template field_singleStepAdjustedPValues #' @template field_intersectionTest #' @template field_varianceOption #' @template field_normalApproximation #' @template field_directionUpper #' #' @details #' This object cannot be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of multi arm means. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsMultiArmMeans <- setRefClass("StageResultsMultiArmMeans", contains = "StageResults", fields = list( stage = "integer", combInverseNormal = "matrix", combFisher = "matrix", overallTestStatistics = "matrix", overallStDevs = "matrix", overallPooledStDevs = "matrix", overallPValues = "matrix", testStatistics = "matrix", separatePValues = "matrix", effectSizes = "matrix", singleStepAdjustedPValues = "matrix", intersectionTest = "character", varianceOption = "character", normalApproximation = "logical", directionUpper = "logical" ), methods = list( initialize = function(design, dataInput, ..., varianceOption = C_VARIANCE_OPTION_MULTIARMED_DEFAULT, normalApproximation = FALSE) { callSuper( .design = design, .dataInput = dataInput, ..., varianceOption = varianceOption, normalApproximation = normalApproximation ) init(design = design, dataInput = dataInput) for (param in c( "singleStepAdjustedPValues", "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal" )) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } .setParameterType("varianceOption", ifelse( identical(varianceOption, C_VARIANCE_OPTION_MULTIARMED_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) .setParameterType("normalApproximation", ifelse( identical(normalApproximation, FALSE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) .setParameterType("directionUpper", ifelse( identical(directionUpper, C_DIRECTION_UPPER_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) }, .getParametersToShow = function() { parametersToShow <- c( "stages", "thetaH0", "direction", "normalApproximation", "directionUpper", "varianceOption", "intersectionTest", "overallTestStatistics", "overallPValues", "overallStDevs", "overallPooledStDevs", "testStatistics", "separatePValues", "effectSizes", "singleStepAdjustedPValues" ) if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c( parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c( parametersToShow, "combFisher", "weightsFisher" ) } return(parametersToShow) } ) ) #' #' @name StageResultsRates #' #' @title #' Stage Results of Rates #' #' @description #' Class for stage results of rates. #' #' @template field_stages #' @template field_testStatistics #' @template field_overallTestStatistics #' @template field_pValues #' @template field_overallPValues #' @template field_effectSizes #' @template field_direction #' @template field_testActions #' @template field_thetaH0 #' @template field_normalApproximation #' @template field_weightsFisher #' @template field_weightsInverseNormal #' @template field_combInverseNormal #' @template field_combFisher #' @field ... Names of \code{dataInput}. #' #' @details #' This object cannot be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of rates. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsRates <- setRefClass("StageResultsRates", contains = "StageResults", fields = list( combInverseNormal = "numeric", combFisher = "numeric", overallTestStatistics = "numeric", overallPValues = "numeric", effectSizes = "numeric", testStatistics = "numeric", overallPi1 = "numeric", overallPi2 = "numeric", overallEvents = "numeric", overallEvents1 = "numeric", overallEvents2 = "numeric", overallSampleSizes = "numeric", overallSampleSizes1 = "numeric", overallSampleSizes2 = "numeric", normalApproximation = "logical" ), methods = list( initialize = function(design, dataInput, ..., normalApproximation = TRUE) { callSuper( .design = design, .dataInput = dataInput, ..., normalApproximation = normalApproximation ) init(design = design, dataInput = dataInput) for (param in c( "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal" )) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } .setParameterType("normalApproximation", ifelse( identical(normalApproximation, TRUE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) }, .getParametersToShow = function() { parametersToShow <- c( "stages", "overallTestStatistics", "overallPValues" ) if (.dataInput$getNumberOfGroups() == 1) { parametersToShow <- c( parametersToShow, "overallEvents", "overallSampleSizes", "overallPi1" ) } else if (.dataInput$getNumberOfGroups() == 2) { parametersToShow <- c( parametersToShow, "overallEvents1", "overallEvents2", "overallSampleSizes1", "overallSampleSizes2", "overallPi1", "overallPi2" ) } parametersToShow <- c( parametersToShow, "testStatistics", "pValues" ) if (.dataInput$getNumberOfGroups() > 1) { parametersToShow <- c(parametersToShow, "effectSizes") } if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c( parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c( parametersToShow, "combFisher", "weightsFisher" ) } parametersToShow <- c( parametersToShow, "thetaH0", "direction", "normalApproximation" ) return(parametersToShow) } ) ) #' @name StageResultsMultiArmRates #' #' @title #' Stage Results Multi Arm Rates #' #' @description #' Class for stage results of multi arm rates data #' #' @template field_stages #' @template field_testStatistics #' @template field_pValues #' @template field_combInverseNormal #' @template field_combFisher #' @template field_effectSizes #' @template field_testActions #' @template field_weightsFisher #' @template field_weightsInverseNormal #' @template field_combInverseNormal #' @template field_combFisher #' @template field_overallTestStatistics #' @template field_overallPValues #' @template field_testStatistics #' @template field_separatePValues #' @template field_effectSizes #' @template field_singleStepAdjustedPValues #' @template field_intersectionTest #' @template field_normalApproximation #' @template field_directionUpper #' #' @details #' This object cannot be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of multi arm rates. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsMultiArmRates <- setRefClass("StageResultsMultiArmRates", contains = "StageResults", fields = list( stage = "integer", overallPiTreatments = "matrix", overallPiControl = "matrix", combInverseNormal = "matrix", combFisher = "matrix", overallTestStatistics = "matrix", overallPValues = "matrix", testStatistics = "matrix", separatePValues = "matrix", effectSizes = "matrix", singleStepAdjustedPValues = "matrix", intersectionTest = "character", normalApproximation = "logical", directionUpper = "logical" ), methods = list( initialize = function(design, dataInput, ..., normalApproximation = FALSE) { callSuper( .design = design, .dataInput = dataInput, ..., normalApproximation = normalApproximation ) init(design = design, dataInput = dataInput) for (param in c( "singleStepAdjustedPValues", "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal" )) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } .setParameterType("normalApproximation", ifelse( identical(normalApproximation, FALSE), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) .setParameterType("directionUpper", ifelse( identical(directionUpper, C_DIRECTION_UPPER_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) }, .getParametersToShow = function() { parametersToShow <- c( "stages", "thetaH0", "direction", "normalApproximation", "directionUpper", "overallPiControl", "overallPiTreatments", "intersectionTest", "overallTestStatistics", "overallPValues", "testStatistics", "separatePValues", "singleStepAdjustedPValues" ) if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c( parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c( parametersToShow, "combFisher", "weightsFisher" ) } return(parametersToShow) } ) ) #' #' @name StageResultsSurvival #' #' @title #' Stage Results of Survival Data #' #' @description #' Class for stage results survival data. #' #' @template field_stages #' @template field_testStatistics #' @template field_overallTestStatistics #' @template field_separatePValues #' @template field_singleStepAdjustedPValues #' @template field_overallPValues #' @template field_direction #' @template field_directionUpper #' @template field_intersectionTest #' @template field_combInverseNormal #' @template field_combFisher #' @template field_effectSizes #' @template field_testActions #' @template field_thetaH0 #' @template field_weightsFisher #' @template field_weightsInverseNormal #' @template field_normalApproximation #' @field ... Names of \code{dataInput}. #' #' @details #' This object cannot be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of survival data. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsSurvival <- setRefClass("StageResultsSurvival", contains = "StageResults", fields = list( combInverseNormal = "numeric", combFisher = "numeric", overallPValues = "numeric", effectSizes = "numeric", overallTestStatistics = "numeric", overallEvents = "numeric", overallAllocationRatios = "numeric", events = "numeric", allocationRatios = "numeric", testStatistics = "numeric" ), methods = list( initialize = function(design, dataInput, ...) { callSuper(.design = design, .dataInput = dataInput, ...) init(design = design, dataInput = dataInput) for (param in c( "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal" )) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } }, .getParametersToShow = function() { parametersToShow <- c( "stages", "overallTestStatistics", "overallPValues", "overallEvents", "overallAllocationRatios", "events", "allocationRatios", "testStatistics", "pValues", "overallPValues", "effectSizes" ) if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c( parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c( parametersToShow, "combFisher", "weightsFisher" ) } parametersToShow <- c( parametersToShow, "thetaH0", "direction" ) return(parametersToShow) } ) ) #' @name StageResultsMultiArmSurvival #' #' @title #' Stage Results Multi Arm Survival #' #' @description #' Class for stage results of multi arm survival data #' #' @template field_stages #' @template field_testStatistics #' @template field_pValues #' @template field_combInverseNormal #' @template field_combFisher #' @template field_effectSizes #' @template field_testActions #' @template field_weightsFisher #' @template field_weightsInverseNormal #' @template field_combInverseNormal #' @template field_combFisher #' @template field_overallTestStatistics #' @template field_overallPValues #' @template field_testStatistics #' @template field_separatePValues #' @template field_effectSizes #' @template field_singleStepAdjustedPValues #' @template field_intersectionTest #' @template field_directionUpper #' #' @details #' This object cannot be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of multi arm survival. #' #' @include class_core_parameter_set.R #' @include class_design.R #' @include class_analysis_dataset.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' StageResultsMultiArmSurvival <- setRefClass("StageResultsMultiArmSurvival", contains = "StageResults", fields = list( stage = "integer", combInverseNormal = "matrix", combFisher = "matrix", overallTestStatistics = "matrix", overallPValues = "matrix", testStatistics = "matrix", separatePValues = "matrix", effectSizes = "matrix", singleStepAdjustedPValues = "matrix", intersectionTest = "character", directionUpper = "logical" ), methods = list( initialize = function(design, dataInput, ..., normalApproximation = FALSE) { callSuper(.design = design, .dataInput = dataInput, ...) init(design = design, dataInput = dataInput) for (param in c( "singleStepAdjustedPValues", "weightsFisher", "weightsInverseNormal", "combFisher", "combInverseNormal" )) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } for (param in .getParametersToShow()) { if (.getParameterType(param) == C_PARAM_TYPE_UNKNOWN) { .setParameterType(param, C_PARAM_GENERATED) } } .setParameterType("directionUpper", ifelse( identical(directionUpper, C_DIRECTION_UPPER_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) }, .getParametersToShow = function() { parametersToShow <- c( "stages", "thetaH0", "direction", "directionUpper", "intersectionTest", "overallTestStatistics", "overallPValues", "testStatistics", "separatePValues", "effectSizes", "singleStepAdjustedPValues" ) if (.isTrialDesignInverseNormal(.design)) { parametersToShow <- c( parametersToShow, "combInverseNormal", "weightsInverseNormal" ) } else if (.isTrialDesignFisher(.design)) { parametersToShow <- c( parametersToShow, "combFisher", "weightsFisher" ) } return(parametersToShow) } ) ) #' #' @name StageResultsEnrichmentMeans #' #' @title #' Stage Results Enrichment Means #' #' @description #' Class for stage results of enrichment means data #' #' @template field_stages #' @template field_thetaH0 #' @template field_direction #' @template field_normalApproximation #' @template field_directionUpper #' @template field_varianceOption #' @template field_intersectionTest #' @template field_testStatistics #' @template field_overallTestStatistics #' @template field_pValues #' @template field_overallPValues #' @template field_overallStDevs #' @template field_overallPooledStDevs #' @template field_separatePValues #' @template field_effectSizes #' @template field_singleStepAdjustedPValues #' @template field_stratifiedAnalysis #' @template field_combInverseNormal #' @template field_combFisher #' @template field_weightsFisher #' @template field_weightsInverseNormal #' #' @details #' This object cannot be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of enrichment means. #' #' @keywords internal #' #' @importFrom methods new #' StageResultsEnrichmentMeans <- setRefClass("StageResultsEnrichmentMeans", contains = "StageResultsMultiArmMeans", fields = list( .overallSampleSizes1 = "matrix", .overallSampleSizes2 = "matrix", stratifiedAnalysis = "logical" ), methods = list( .getParametersToShow = function() { return(c(callSuper(), "stratifiedAnalysis")) } ) ) #' #' @name StageResultsEnrichmentRates #' #' @title #' Stage Results Enrichment Rates #' #' @description #' Class for stage results of enrichment rates data. #' #' @template field_stages #' @template field_testStatistics #' @template field_pValues #' @template field_combInverseNormal #' @template field_combFisher #' @template field_effectSizes #' @template field_testActions #' @template field_weightsFisher #' @template field_weightsInverseNormal #' #' @details #' This object cannot be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of enrichment rates. #' #' @keywords internal #' #' @importFrom methods new #' StageResultsEnrichmentRates <- setRefClass("StageResultsEnrichmentRates", contains = "StageResultsMultiArmRates", fields = list( .overallSampleSizes1 = "matrix", .overallSampleSizes2 = "matrix", overallPisTreatment = "matrix", overallPisControl = "matrix", stratifiedAnalysis = "logical" ), methods = list( .getParametersToShow = function() { parametersToShow <- callSuper() parametersToShow <- parametersToShow[!(parametersToShow %in% c("overallPiTreatments", "overallPiControl"))] return(c(parametersToShow, "stratifiedAnalysis", "overallPisTreatment", "overallPisControl")) } ) ) #' #' @name StageResultsEnrichmentSurvival #' #' @title #' Stage Results Enrichment Survival #' #' @description #' Class for stage results of enrichment survival data. #' #' @template field_stages #' @template field_testStatistics #' @template field_pValues #' @template field_combInverseNormal #' @template field_combFisher #' @template field_effectSizes #' @template field_testActions #' @template field_weightsFisher #' @template field_weightsInverseNormal #' #' @details #' This object cannot be created directly; use \code{getStageResults} #' with suitable arguments to create the stage results of a dataset of enrichment survival. #' #' @keywords internal #' #' @importFrom methods new #' StageResultsEnrichmentSurvival <- setRefClass("StageResultsEnrichmentSurvival", contains = "StageResultsMultiArmSurvival", fields = list( stratifiedAnalysis = "logical", .overallEvents = "matrix" ), methods = list( .getParametersToShow = function() { return(c(callSuper(), "stratifiedAnalysis")) } ) ) #' #' @title #' Names of a Stage Results Object #' #' @description #' Function to get the names of a \code{\link{StageResults}} object. #' #' @param x A \code{\link{StageResults}} object. #' #' @details #' Returns the names of stage results that can be accessed by the user. #' #' @template return_names #' #' @export #' #' @keywords internal #' names.StageResults <- function(x) { return(x$.getParametersToShow()) } #' #' @title #' Coerce Stage Results to a Data Frame #' #' @description #' Returns the \code{StageResults} as data frame. #' #' @param x A \code{\link{StageResults}} object. #' @inheritParams param_niceColumnNamesEnabled #' @inheritParams param_includeAllParameters #' @inheritParams param_three_dots #' #' @details #' Coerces the stage results to a data frame. #' #' @template return_dataframe #' #' @export #' #' @keywords internal #' as.data.frame.StageResults <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, type = 1, ...) { if (type == 1) { parametersToShow <- x$.getParametersToShow() return(.getAsDataFrame( parameterSet = x, parameterNames = parametersToShow, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters, tableColumnNames = .getTableColumnNames(design = x$.design) )) } kMax <- length(x$stages) group1 <- rep(1, kMax) group2 <- rep(2, kMax) empty <- rep(NA_real_, kMax) stageResults <- data.frame( Stage = c(x$stages, x$stages), Group = c(group1, group2), "Cumulative Mean" = c(x$overallMeans1, x$overallMeans2), "Cumulative stDev" = c(x$overallStDevs1, x$overallStDevs2), "Cumulative test statistics" = c(x$overallTestStatistics, empty), "Overall p-value" = c(x$overallPValues, empty), "Cumulative stDev" = c(x$overallStDevs, empty), "Stage-wise test statistic" = c(x$testStatistics, empty), "Stage-wise p-value" = c(x$pValues, empty), "Comb Inverse Normal" = c(x$combInverseNormal, empty), "Comb Fisher" = c(x$combFisher, empty), "Weights Fisher" = c(x$weightsFisher, empty), "Weights Inverse Normal" = c(x$weightsInverseNormal, empty), row.names = row.names, ... ) stageResults <- stageResults[with(stageResults, order(Stage, Group)), ] return(stageResults) } .getTreatmentArmsToShow <- function(x, ...) { dataInput <- x if (!inherits(dataInput, "Dataset")) { dataInput <- x[[".dataInput"]] } if (is.null(dataInput) || !inherits(dataInput, "Dataset")) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to get 'dataInput' from ", .getClassName(x)) } numberOfTreatments <- dataInput$getNumberOfGroups() if (numberOfTreatments > 1) { validComparisons <- 1L:as.integer(numberOfTreatments - 1) } else { validComparisons <- 1L } treatmentArmsToShow <- .getOptionalArgument("treatmentArms", ...) if (!is.null(treatmentArmsToShow)) { treatmentArmsToShow <- as.integer(na.omit(treatmentArmsToShow)) } if (is.null(treatmentArmsToShow) || length(treatmentArmsToShow) == 0 || all(is.na(treatmentArmsToShow)) || !is.numeric(treatmentArmsToShow)) { treatmentArmsToShow <- validComparisons } else if (!all(treatmentArmsToShow %in% validComparisons)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'treatmentArms' (", .arrayToString(treatmentArmsToShow), ") must be a vector ", "containing one or more values of ", .arrayToString(validComparisons) ) } treatmentArmsToShow <- sort(unique(treatmentArmsToShow)) return(treatmentArmsToShow) } .getPopulationsToShow <- function(x, ..., gMax) { dataInput <- x if (!inherits(dataInput, "Dataset")) { dataInput <- x[[".dataInput"]] } if (is.null(dataInput) || !inherits(dataInput, "Dataset")) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to get 'dataInput' from ", .getClassName(x)) } numberOfPopulations <- gMax if (numberOfPopulations > 1) { validComparisons <- 1L:as.integer(numberOfPopulations) } else { validComparisons <- 1L } populationsToShow <- .getOptionalArgument("populations", ...) if (!is.null(populationsToShow)) { populationsToShow <- as.integer(na.omit(populationsToShow)) } if (is.null(populationsToShow) || length(populationsToShow) == 0 || all(is.na(populationsToShow)) || !is.numeric(populationsToShow)) { populationsToShow <- validComparisons } else if (!all(populationsToShow %in% validComparisons)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'populations' (", .arrayToString(populationsToShow), ") must be a vector ", "containing one or more values of ", .arrayToString(validComparisons) ) } populationsToShow <- sort(unique(populationsToShow)) return(populationsToShow) } #' #' @title #' Stage Results Plotting #' #' @description #' Plots the conditional power together with the likelihood function. #' #' @param x The stage results at given stage, obtained from \code{\link[=getStageResults]{getStageResults()}} or #' \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @inheritParams param_stage #' @inheritParams param_nPlanned #' @inheritParams param_allocationRatioPlanned #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @param legendTitle The legend title. #' @inheritParams param_palette #' @inheritParams param_showSource #' @inheritParams param_plotSettings #' @inheritParams param_legendPosition #' @param type The plot type (default = 1). Note that at the moment only one type #' (the conditional power plot) is available. #' @param ... Optional \link[=param_three_dots_plot]{plot arguments}. Furthermore the following arguments can be defined: #' \itemize{ #' \item \code{thetaRange}: A range of assumed effect sizes if testing means or a survival design was specified. #' Additionally, if testing means was selected, an assumed standard deviation can be specified (default is 1). #' \item \code{piTreatmentRange}: A range of assumed rates pi1 to calculate the conditional power. #' Additionally, if a two-sample comparison was selected, pi2 can be specified (default is the value from #' \code{\link[=getAnalysisResults]{getAnalysisResults()}}). #' \item \code{directionUpper}: Specifies the direction of the alternative, #' only applicable for one-sided testing; default is \code{TRUE} #' which means that larger values of the test statistics yield smaller p-values. #' \item \code{\link[=param_thetaH0]{thetaH0}}: The null hypothesis value, default is 0 for the normal and the binary case, #' it is 1 for the survival case. #' For testing a rate in one sample, a value thetaH0 in (0,1) has to be specified for #' defining the null hypothesis H0: pi = thetaH0. #' } #' #' @details #' Generic function to plot all kinds of stage results. #' The conditional power is calculated only if effect size and sample size is specified. #' #' @template return_object_ggplot #' #' @examples #' design <- getDesignGroupSequential( #' kMax = 4, alpha = 0.025, #' informationRates = c(0.2, 0.5, 0.8, 1), #' typeOfDesign = "WT", deltaWT = 0.25 #' ) #' #' dataExample <- getDataset( #' n = c(20, 30, 30), #' means = c(50, 51, 55), #' stDevs = c(130, 140, 120) #' ) #' #' stageResults <- getStageResults(design, dataExample, thetaH0 = 20) #' #' \dontrun{ #' if (require(ggplot2)) plot(stageResults, nPlanned = c(30), thetaRange = c(0, 100)) #' } #' #' @export #' plot.StageResults <- function(x, y, ..., type = 1L, nPlanned, allocationRatioPlanned = 1, # C_ALLOCATION_RATIO_DEFAULT main = NA_character_, xlab = NA_character_, ylab = NA_character_, legendTitle = NA_character_, palette = "Set1", legendPosition = NA_integer_, showSource = FALSE, plotSettings = NULL) { fCall <- match.call(expand.dots = FALSE) .assertGgplotIsInstalled() .assertIsStageResults(x) .assertIsValidLegendPosition(legendPosition) if (.isConditionalPowerEnabled(nPlanned)) { .assertIsValidAllocationRatioPlanned(allocationRatioPlanned, x$.dataInput$getNumberOfGroups()) } .stopInCaseOfIllegalStageDefinition2(...) if (x$.design$kMax == 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "cannot plot stage results of a fixed design") } if (!is.logical(showSource) || isTRUE(showSource)) { stageResultsName <- .getOptionalArgument("stageResultsName", ...) if (is.null(stageResultsName)) { stageResultsName <- deparse(fCall$x) } cat("Source data of the plot:\n") cat(" Use getConditionalPower(..., addPlotData = TRUE) to create the data.\n", sep = "") cat("Simple plot command example:\n", sep = "") cmd <- paste0( "condPow <- getConditionalPower(", stageResultsName, ", nPlanned = ", .arrayToString(nPlanned, vectorLookAndFeelEnabled = TRUE) ) if (.isConditionalPowerEnabled(nPlanned) && allocationRatioPlanned != C_ALLOCATION_RATIO_DEFAULT) { cmd <- paste0(cmd, ", allocationRatioPlanned = ", allocationRatioPlanned) } if (grepl("Means|Survival", .getClassName(x))) { cmd <- paste0(cmd, ", thetaRange = seq(0, 1, 0.1)") } else if (grepl("Rates", .getClassName(x))) { cmd <- paste0(cmd, ", piTreatmentRange = seq(0, 1, 0.1)") } cmd <- paste0(cmd, ", addPlotData = TRUE)") cat(" ", cmd, "\n", sep = "") cat(" plotData <- condPow$.plotData # get plot data list\n", sep = "") cat(" plotData # show plot data list\n", sep = "") cat(" plot(plotData$xValues, plotData$condPowerValues)\n", sep = "") cat(" plot(plotData$xValues, plotData$likelihoodValues)\n", sep = "") } plotData <- .getConditionalPowerPlot( stageResults = x, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... ) yParameterName1 <- "Conditional power" yParameterName2 <- "Likelihood" if (.isMultiArmStageResults(x)) { treatmentArmsToShow <- .getTreatmentArmsToShow(x, ...) data <- data.frame( xValues = numeric(0), yValues = numeric(0), categories = character(0), treatmentArms = numeric(0) ) for (treatmentArm in treatmentArmsToShow) { legend1 <- ifelse(length(treatmentArmsToShow) == 1, yParameterName1, paste0(yParameterName1, " (", treatmentArm, " vs control)") ) legend2 <- ifelse(length(treatmentArmsToShow) == 1, yParameterName2, paste0(yParameterName2, " (", treatmentArm, " vs control)") ) treatmentArmIndices <- which(plotData$treatmentArms == treatmentArm) if (all(is.na(plotData$condPowerValues[treatmentArmIndices]))) { if (!all(is.na(plotData$likelihoodValues[treatmentArmIndices]))) { data <- rbind(data, data.frame( xValues = plotData$xValues[treatmentArmIndices], yValues = plotData$likelihoodValues[treatmentArmIndices], categories = rep(legend2, length(plotData$xValues[treatmentArmIndices])), treatmentArms = rep(treatmentArm, length(plotData$xValues[treatmentArmIndices])) )) } } else { data <- rbind(data, data.frame( xValues = c( plotData$xValues[treatmentArmIndices], plotData$xValues[treatmentArmIndices] ), yValues = c( plotData$condPowerValues[treatmentArmIndices], plotData$likelihoodValues[treatmentArmIndices] ), categories = c( rep(legend1, length(plotData$xValues[treatmentArmIndices])), rep(legend2, length(plotData$xValues[treatmentArmIndices])) ), treatmentArms = c( rep(treatmentArm, length(plotData$xValues[treatmentArmIndices])), rep(treatmentArm, length(plotData$xValues[treatmentArmIndices])) ) )) } } } else if (.isEnrichmentStageResults(x)) { gMax <- max(na.omit(plotData$populations)) populationsToShow <- .getPopulationsToShow(x, ..., gMax = gMax) data <- data.frame( xValues = numeric(0), yValues = numeric(0), categories = character(0), populations = numeric(0) ) for (population in populationsToShow) { populationName <- ifelse(population == gMax, "F", paste0("S", population)) legend1 <- ifelse(length(populationsToShow) == 1, yParameterName1, paste0(yParameterName1, " (", populationName, ")") ) legend2 <- ifelse(length(populationsToShow) == 1, yParameterName2, paste0(yParameterName2, " (", populationName, ")") ) populationIndices <- which(plotData$populations == population) if (all(is.na(plotData$condPowerValues[populationIndices]))) { if (!all(is.na(plotData$likelihoodValues[populationIndices]))) { data <- rbind(data, data.frame( xValues = plotData$xValues[populationIndices], yValues = plotData$likelihoodValues[populationIndices], categories = rep(legend2, length(plotData$xValues[populationIndices])), populations = rep(population, length(plotData$xValues[populationIndices])) )) } } else { data <- rbind(data, data.frame( xValues = c( plotData$xValues[populationIndices], plotData$xValues[populationIndices] ), yValues = c( plotData$condPowerValues[populationIndices], plotData$likelihoodValues[populationIndices] ), categories = c( rep(legend1, length(plotData$xValues[populationIndices])), rep(legend2, length(plotData$xValues[populationIndices])) ), populations = c( rep(population, length(plotData$xValues[populationIndices])), rep(population, length(plotData$xValues[populationIndices])) ) )) } } } else { if (all(is.na(plotData$condPowerValues))) { legendPosition <- -1 data <- data.frame( xValues = plotData$xValues, yValues = plotData$likelihoodValues, categories = rep(yParameterName2, length(plotData$xValues)) ) } else { data <- data.frame( xValues = c(plotData$xValues, plotData$xValues), yValues = c(plotData$condPowerValues, plotData$likelihoodValues), categories = c( rep(yParameterName1, length(plotData$xValues)), rep(yParameterName2, length(plotData$xValues)) ) ) } } data$categories <- factor(data$categories, levels = unique(data$categories)) main <- ifelse(is.na(main), C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, main) ylab <- ifelse(is.na(ylab), C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD, ylab) if (is.na(legendTitle)) { legendTitle <- "Parameter" } return(.createAnalysisResultsPlotObject(x, data = data, plotData = plotData, main = main, xlab = xlab, ylab = ylab, legendTitle = legendTitle, palette = palette, legendPosition = legendPosition, numberOfPairedLines = 2L, plotSettings = plotSettings )) } .createAnalysisResultsPlotObject <- function(x, ..., data, plotData, main = NA_character_, xlab = NA_character_, ylab = NA_character_, legendTitle = NA_character_, palette = "Set1", legendPosition = NA_integer_, numberOfPairedLines = NA_integer_, plotSettings = NULL) { ciModeEnabled <- !is.null(data[["lower"]]) && !is.null(data[["upper"]]) if (!ciModeEnabled) { p <- ggplot2::ggplot(data, ggplot2::aes( x = .data[["xValues"]], y = .data[["yValues"]], colour = factor(.data[["categories"]]), linetype = factor(.data[["categories"]]) )) } else { p <- ggplot2::ggplot(data, ggplot2::aes( x = .data[["xValues"]], y = .data[["yValues"]], colour = factor(.data[["categories"]]) )) } if (is.null(plotSettings)) { plotSettings <- x$getPlotSettings() } p <- plotSettings$setTheme(p) p <- plotSettings$hideGridLines(p) # set main title mainTitle <- ifelse(!is.call(main) && !isS4(main) && is.na(main), plotData$main, main) p <- plotSettings$setMainTitle(p, mainTitle, subtitle = plotData$sub) # set legend if (is.na(legendPosition)) { legendPosition <- C_POSITION_LEFT_TOP } p <- plotSettings$setLegendPosition(p, legendPosition = legendPosition) p <- plotSettings$setLegendBorder(p) p <- plotSettings$setLegendTitle(p, legendTitle) p <- plotSettings$setLegendLabelSize(p) # set axes labels p <- plotSettings$setAxesLabels(p, xAxisLabel = plotData$xlab, yAxisLabel1 = plotData$ylab, xlab = xlab, ylab = ylab ) # plot lines and points if (!ciModeEnabled) { if (is.na(numberOfPairedLines)) { numberOfPairedLines <- 2 if (x$.isMultiArm()) { numberOfPairedLines <- length(unique(data$treatmentArms)) - 1 } else if (x$.isEnrichment()) { numberOfPairedLines <- length(unique(data$populations)) - 1 } } p <- plotSettings$plotValues(p, plotPointsEnabled = FALSE, pointBorder = 1) n <- length(unique(data$categories)) / numberOfPairedLines if (n > 1) { lineTypeValues <- rep(1:numberOfPairedLines, n) colorTypes <- sort(rep(1:n, numberOfPairedLines)) for (i in c(1, 3)) { colorTypes[colorTypes >= i] <- colorTypes[colorTypes >= i] + 1 } p <- p + ggplot2::scale_color_manual(name = legendTitle, values = colorTypes) p <- p + ggplot2::scale_linetype_manual(name = legendTitle, values = lineTypeValues) } else { colorValues <- c(2, 4) if (!x$.isMultiArm()) { colorValues <- c(2, 2) # use only one color } p <- p + ggplot2::scale_color_manual(name = legendTitle, values = colorValues) p <- p + ggplot2::scale_linetype_manual(name = legendTitle, values = c(1, 2)) } } # plot confidence intervall else { pd <- ggplot2::position_dodge(0.15) p <- p + ggplot2::geom_errorbar( data = data, ggplot2::aes(ymin = .data[["lower"]], ymax = .data[["upper"]]), width = 0.15, position = pd, size = 0.8 ) p <- p + ggplot2::geom_line(position = pd, linetype = "longdash") p <- p + ggplot2::geom_point(position = pd, size = 2.0) stage <- unique(data$xValues) kMax <- list(...)[["kMax"]] if (length(stage) == 1 && !is.null(kMax)) { stages <- 1:kMax p <- p + ggplot2::scale_x_continuous(breaks = stages) } else if (length(stage) > 1 && all(stage %in% 1:10)) { p <- p + ggplot2::scale_x_continuous(breaks = stage) } } p <- plotSettings$setAxesAppearance(p) p <- plotSettings$enlargeAxisTicks(p) companyAnnotationEnabled <- .getOptionalArgument("companyAnnotationEnabled", ...) if (is.null(companyAnnotationEnabled) || !is.logical(companyAnnotationEnabled)) { companyAnnotationEnabled <- FALSE } p <- plotSettings$addCompanyAnnotation(p, enabled = companyAnnotationEnabled) # start plot generation return(p) } rpact/R/f_analysis_multiarm_rates.R0000644000176200001440000017517014445307575017200 0ustar liggesusers## | ## | *Analysis of rates in multi-arm designs with adaptive test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_logger.R NULL #' #' @title #' Get Analysis Results Rates #' #' @description #' Returns an analysis result object. #' #' @param design The trial design. #' #' @return Returns a \code{AnalysisResultsRates} object. #' #' @keywords internal #' #' @noRd #' .getAnalysisResultsRatesMultiArm <- function(..., design, dataInput) { if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsRatesInverseNormalMultiArm( design = design, dataInput = dataInput, ... )) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsRatesFisherMultiArm( design = design, dataInput = dataInput, ... )) } if (.isTrialDesignConditionalDunnett(design)) { return(.getAnalysisResultsRatesConditionalDunnettMultiArm( design = design, dataInput = dataInput, ... )) } .stopWithWrongDesignMessage(design) } .getAnalysisResultsRatesInverseNormalMultiArm <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, thetaH0 = C_THETA_H0_RATES_DEFAULT, piTreatments = NA_real_, piControl = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsRatesInverseNormalMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsMultiArmInverseNormal(design = design, dataInput = dataInput) results <- .getAnalysisResultsRatesMultiArmAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, thetaH0 = thetaH0, piTreatments = piTreatments, piControl = piControl, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance ) return(results) } .getAnalysisResultsRatesFisherMultiArm <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, thetaH0 = C_THETA_H0_RATES_DEFAULT, piTreatments = NA_real_, piControl = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsRatesFisherMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsMultiArmFisher(design = design, dataInput = dataInput) results <- .getAnalysisResultsRatesMultiArmAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, thetaH0 = thetaH0, piTreatments = piTreatments, piControl = piControl, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } .getAnalysisResultsRatesConditionalDunnettMultiArm <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, thetaH0 = C_THETA_H0_RATES_DEFAULT, piTreatments = NA_real_, piControl = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignConditionalDunnett(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsRatesConditionalDunnettMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsConditionalDunnett(design = design, dataInput = dataInput) results <- .getAnalysisResultsRatesMultiArmAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, thetaH0 = thetaH0, piTreatments = piTreatments, piControl = piControl, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } .getAnalysisResultsRatesMultiArmAll <- function(..., results, design, dataInput, intersectionTest, stage, directionUpper, normalApproximation, thetaH0, piTreatments, piControl, nPlanned, allocationRatioPlanned, tolerance, iterations, seed) { startTime <- Sys.time() intersectionTest <- .getCorrectedIntersectionTestMultiArmIfNecessary(design, intersectionTest) stageResults <- .getStageResultsRatesMultiArm( design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation ) results$.setStageResults(stageResults) .logProgress("Stage results calculated", startTime = startTime) gMax <- stageResults$getGMax() piControl <- .assertIsValidPiControlForMultiArm(piControl, stageResults, stage, results = results) piTreatments <- .assertIsValidPiTreatmentsForMultiArm(piTreatments, stageResults, stage, results = results) .setValueAndParameterType( results, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_MULTIARMED_DEFAULT ) .setValueAndParameterType(results, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType( results, "normalApproximation", normalApproximation, C_NORMAL_APPROXIMATION_RATES_DEFAULT ) .setValueAndParameterType(results, "thetaH0", thetaH0, C_THETA_H0_MEANS_DEFAULT) .setConditionalPowerArguments(results, dataInput, nPlanned, allocationRatioPlanned) .setNPlannedAndPi(results, nPlanned, "piControl", piControl, piTreatments) if (results$.getParameterType("piControl") %in% c(C_PARAM_TYPE_UNKNOWN, C_PARAM_NOT_APPLICABLE)) { .setValueAndParameterType( results, "piControl", matrix(piControl, ncol = 1), matrix(rep(NA_real_, gMax), ncol = 1) ) } else { results$piControl <- matrix(piControl, ncol = 1) } if (results$.getParameterType("piTreatments") %in% c(C_PARAM_TYPE_UNKNOWN, C_PARAM_NOT_APPLICABLE)) { .setValueAndParameterType( results, "piTreatments", matrix(piTreatments, ncol = 1), matrix(rep(NA_real_, gMax), ncol = 1) ) } else { results$piTreatments <- matrix(piTreatments, ncol = 1) } startTime <- Sys.time() if (!.isTrialDesignConditionalDunnett(design)) { results$.closedTestResults <- getClosedCombinationTestResults(stageResults = stageResults) } else { results$.closedTestResults <- getClosedConditionalDunnettTestResults( stageResults = stageResults, design = design, stage = stage ) } .logProgress("Closed test calculated", startTime = startTime) if (design$kMax > 1) { # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { results$.conditionalPowerResults <- .getConditionalPowerRatesMultiArm( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piTreatments = piTreatments, piControl = piControl, iterations = iterations, seed = seed ) .synchronizeIterationsAndSeed(results) } else { results$.conditionalPowerResults <- .getConditionalPowerRatesMultiArm( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piTreatments = piTreatments, piControl = piControl ) results$conditionalPower <- results$.conditionalPowerResults$conditionalPower results$.setParameterType("conditionalPower", C_PARAM_GENERATED) } .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() results$conditionalRejectionProbabilities <- .getConditionalRejectionProbabilitiesMultiArm( stageResults = stageResults, stage = stage ) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_GENERATED) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) } else { results$.setParameterType("conditionalPower", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalPowerSimulated", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_NOT_APPLICABLE) } # RCI - repeated confidence interval repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsRatesMultiArm( design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, normalApproximation = normalApproximation, tolerance = tolerance ) results$repeatedConfidenceIntervalLowerBounds <- matrix(rep(NA_real_, gMax * design$kMax), nrow = gMax, ncol = design$kMax) results$repeatedConfidenceIntervalUpperBounds <- results$repeatedConfidenceIntervalLowerBounds for (k in 1:design$kMax) { for (treatmentArm in 1:gMax) { results$repeatedConfidenceIntervalLowerBounds[treatmentArm, k] <- repeatedConfidenceIntervals[treatmentArm, 1, k] results$repeatedConfidenceIntervalUpperBounds[treatmentArm, k] <- repeatedConfidenceIntervals[treatmentArm, 2, k] } } results$.setParameterType("repeatedConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("repeatedConfidenceIntervalUpperBounds", C_PARAM_GENERATED) # repeated p-value results$repeatedPValues <- .getRepeatedPValuesMultiArm(stageResults = stageResults, tolerance = tolerance) results$.setParameterType("repeatedPValues", C_PARAM_GENERATED) return(results) } .getStageResultsRatesMultiArm <- function(..., design, dataInput, thetaH0 = C_THETA_H0_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, calculateSingleStepAdjusted = FALSE, userFunctionCallEnabled = FALSE) { .assertIsTrialDesign(design) .assertIsDatasetRates(dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertIsValidDirectionUpper(directionUpper, design$sided) .assertIsSingleLogical(normalApproximation, "normalApproximation") .assertIsSingleLogical(calculateSingleStepAdjusted, "calculateSingleStepAdjusted") .warnInCaseOfUnknownArguments( functionName = ".getStageResultsRatesMultiArm", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) gMax <- dataInput$getNumberOfGroups() - 1 kMax <- design$kMax if (.isTrialDesignConditionalDunnett(design)) { if (!normalApproximation) { if (userFunctionCallEnabled) { warning("'normalApproximation' was set to TRUE ", "because conditional Dunnett test was specified as design", call. = FALSE ) } normalApproximation <- TRUE } } intersectionTest <- .getCorrectedIntersectionTestMultiArmIfNecessary( design, intersectionTest, userFunctionCallEnabled ) .assertIsValidIntersectionTestMultiArm(design, intersectionTest) if (intersectionTest == "Dunnett" && !normalApproximation) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Dunnett test cannot be used with Fisher's exact test (normalApproximation = FALSE)", call. = FALSE ) } stageResults <- StageResultsMultiArmRates( design = design, dataInput = dataInput, intersectionTest = intersectionTest, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), normalApproximation = normalApproximation, directionUpper = directionUpper, stage = stage ) piControl <- matrix(rep(NA_real_, kMax), 1, kMax) piTreatments <- matrix(NA_real_, nrow = gMax, ncol = kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallPValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) dimnames(testStatistics) <- list(paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) dimnames(overallTestStatistics) <- list( paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "") ) dimnames(separatePValues) <- list(paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) dimnames(overallPValues) <- list(paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) for (k in 1:stage) { piControl[1, k] <- dataInput$getOverallEvents(stage = k, group = gMax + 1) / dataInput$getOverallSampleSizes(stage = k, group = gMax + 1) for (treatmentArm in 1:gMax) { piTreatments[treatmentArm, k] <- dataInput$getOverallEvents(stage = k, group = treatmentArm) / dataInput$getOverallSampleSizes(stage = k, group = treatmentArm) actEv <- dataInput$getEvents(stage = k, group = treatmentArm) ctrEv <- dataInput$getEvents(stage = k, group = gMax + 1) actN <- dataInput$getSampleSize(stage = k, group = treatmentArm) ctrN <- dataInput$getSampleSize(stage = k, group = gMax + 1) if (normalApproximation) { if (thetaH0 == 0) { if (!is.na(actEv)) { if ((actEv + ctrEv == 0) || (actEv + ctrEv == actN + ctrN)) { testStatistics[treatmentArm, k] <- 0 } else { rateH0 <- (actEv + ctrEv) / (actN + ctrN) testStatistics[treatmentArm, k] <- (actEv / actN - ctrEv / ctrN - thetaH0) / sqrt(rateH0 * (1 - rateH0) * (1 / actN + 1 / ctrN)) } } } else { y <- .getFarringtonManningValues( rate1 = actEv / actN, rate2 = ctrEv / ctrN, theta = thetaH0, allocation = actN / ctrN, method = "diff" ) testStatistics[treatmentArm, k] <- (actEv / actN - ctrEv / ctrN - thetaH0) / sqrt(y$ml1 * (1 - y$ml1) / actN + y$ml2 * (1 - y$ml2) / ctrN) } if (directionUpper) { separatePValues[treatmentArm, k] <- 1 - stats::pnorm(testStatistics[treatmentArm, k]) } else { separatePValues[treatmentArm, k] <- stats::pnorm(testStatistics[treatmentArm, k]) } } else { if (thetaH0 != 0) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'thetaH0' (", thetaH0, ") must be 0 to perform Fisher's exact test" ) } if (directionUpper) { separatePValues[treatmentArm, k] <- stats::phyper(actEv - 1, actEv + ctrEv, actN + ctrN - actEv - ctrEv, actN, lower.tail = FALSE ) } else { separatePValues[treatmentArm, k] <- stats::phyper(actEv, actEv + ctrEv, actN + ctrN - actEv - ctrEv, actN, lower.tail = TRUE ) } if (directionUpper) { testStatistics <- .getOneMinusQNorm(separatePValues) } else { testStatistics <- -.getOneMinusQNorm(separatePValues) } } # overall test statistics actEv <- dataInput$getOverallEvents(stage = k, group = treatmentArm) ctrEv <- dataInput$getOverallEvents(stage = k, group = gMax + 1) actN <- dataInput$getOverallSampleSize(stage = k, group = treatmentArm) ctrN <- dataInput$getOverallSampleSize(stage = k, group = gMax + 1) if (normalApproximation) { if (thetaH0 == 0) { if (!is.na(actEv)) { if ((actEv + ctrEv == 0) || (actEv + ctrEv == actN + ctrN)) { overallTestStatistics[treatmentArm, k] <- 0 } else { overallRateH0 <- (actEv + ctrEv) / (actN + ctrN) overallTestStatistics[treatmentArm, k] <- (actEv / actN - ctrEv / ctrN - thetaH0) / sqrt(overallRateH0 * (1 - overallRateH0) * (1 / actN + 1 / ctrN)) } } } else { y <- .getFarringtonManningValues( rate1 = actEv / actN, rate2 = ctrEv / ctrN, theta = thetaH0, allocation = actN / ctrN, method = "diff" ) overallTestStatistics[treatmentArm, k] <- (actEv / actN - ctrEv / ctrN - thetaH0) / sqrt(y$ml1 * (1 - y$ml1) / actN + y$ml2 * (1 - y$ml2) / ctrN) } if (directionUpper) { overallPValues[treatmentArm, k] <- 1 - stats::pnorm(overallTestStatistics[treatmentArm, k]) } else { overallPValues[treatmentArm, k] <- stats::pnorm(overallTestStatistics[treatmentArm, k]) } } else { if (thetaH0 != 0) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'thetaH0' (", thetaH0, ") must be 0 to perform Fisher's exact test" ) } if (directionUpper) { overallPValues[treatmentArm, k] <- stats::phyper(actEv - 1, actEv + ctrEv, actN + ctrN - actEv - ctrEv, actN, lower.tail = FALSE ) } else { overallPValues[treatmentArm, k] <- stats::phyper(actEv, actEv + ctrEv, actN + ctrN - actEv - ctrEv, actN, lower.tail = TRUE ) } if (directionUpper) { overallTestStatistics <- .getOneMinusQNorm(overallPValues) } else { overallTestStatistics <- -.getOneMinusQNorm(overallPValues) } } } } stageResults$overallPiControl <- piControl stageResults$overallPiTreatments <- piTreatments stageResults$overallTestStatistics <- overallTestStatistics stageResults$overallPValues <- overallPValues stageResults$testStatistics <- testStatistics stageResults$separatePValues <- separatePValues effectSizes <- matrix(numeric(0), ncol = ncol(piControl)) for (treatmentArm in 1:gMax) { effectSizes <- rbind(effectSizes, piTreatments[treatmentArm, ] - piControl) } stageResults$effectSizes <- effectSizes stageResults$.setParameterType("effectSizes", C_PARAM_GENERATED) .setWeightsToStageResults(design, stageResults) if (!calculateSingleStepAdjusted) { return(stageResults) } # Calculation of single stage adjusted p-Values and overall test statistics # for determination of RCIs singleStepAdjustedPValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) combInverseNormal <- matrix(NA_real_, nrow = gMax, ncol = kMax) combFisher <- matrix(NA_real_, nrow = gMax, ncol = kMax) if (.isTrialDesignInverseNormal(design)) { weightsInverseNormal <- stageResults$weightsInverseNormal } else if (.isTrialDesignFisher(design)) { weightsFisher <- stageResults$weightsFisher } for (k in 1:stage) { selected <- sum(!is.na(separatePValues[, k])) sampleSizesSelected <- as.numeric(na.omit( dataInput$getSampleSizes(stage = k, group = -(gMax + 1)) )) sigma <- sqrt(sampleSizesSelected / (sampleSizesSelected + dataInput$getSampleSize(k, gMax + 1))) %*% sqrt(t(sampleSizesSelected / (sampleSizesSelected + dataInput$getSampleSize(k, gMax + 1)))) diag(sigma) <- 1 for (treatmentArm in 1:gMax) { if ((intersectionTest == "Bonferroni") || (intersectionTest == "Simes")) { if (.isTrialDesignGroupSequential(design)) { overallPValues[treatmentArm, k] <- min(1, overallPValues[treatmentArm, k] * selected) } else { singleStepAdjustedPValues[treatmentArm, k] <- min(1, separatePValues[treatmentArm, k] * selected) } } else if (intersectionTest == "Sidak") { if (.isTrialDesignGroupSequential(design)) { overallPValues[treatmentArm, k] <- 1 - (1 - overallPValues[treatmentArm, k])^selected } else { singleStepAdjustedPValues[treatmentArm, k] <- 1 - (1 - separatePValues[treatmentArm, k])^selected } } else if (intersectionTest == "Dunnett") { if (!is.na(testStatistics[treatmentArm, k])) { df <- NA_real_ singleStepAdjustedPValues[treatmentArm, k] <- 1 - .getMultivariateDistribution( type = "normal", upper = ifelse(directionUpper, testStatistics[treatmentArm, k], -testStatistics[treatmentArm, k]), sigma = sigma, df = df ) } } if (.isTrialDesignInverseNormal(design)) { combInverseNormal[treatmentArm, k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(singleStepAdjustedPValues[treatmentArm, 1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) } else if (.isTrialDesignFisher(design)) { combFisher[treatmentArm, k] <- prod(singleStepAdjustedPValues[treatmentArm, 1:k]^weightsFisher[1:k]) } } } stageResults$singleStepAdjustedPValues <- singleStepAdjustedPValues stageResults$.setParameterType("singleStepAdjustedPValues", C_PARAM_GENERATED) if (.isTrialDesignFisher(design)) { stageResults$combFisher <- combFisher stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$combInverseNormal <- combInverseNormal stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) } return(stageResults) } .getRootThetaRatesMultiArm <- function(..., design, dataInput, treatmentArm, stage, directionUpper, normalApproximation, intersectionTest, thetaLow, thetaUp, firstParameterName, secondValue, tolerance) { result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsRatesMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][treatmentArm, stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, callingFunctionInformation = ".getRootThetaRatesMultiArm" ) return(result) } .getRepeatedConfidenceIntervalsRatesMultiArmAll <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { .assertIsValidIntersectionTestMultiArm(design, intersectionTest) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) stageResults <- .getStageResultsRatesMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = 0, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, calculateSingleStepAdjusted = FALSE ) gMax <- dataInput$getNumberOfGroups() - 1 repeatedConfidenceIntervals <- array(NA_real_, dim = c(gMax, 2, design$kMax)) # Confidence interval for second stage when using conditional Dunnett test if (.isTrialDesignConditionalDunnett(design)) { startTime <- Sys.time() for (treatmentArm in 1:gMax) { if (!is.na(stageResults$testStatistics[treatmentArm, 2])) { thetaLow <- -1 thetaUp <- 1 iteration <- 50 prec <- 1 while (prec > tolerance) { theta <- (thetaLow + thetaUp) / 2 stageResults <- .getStageResultsRatesMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = TRUE, intersectionTest = intersectionTest, normalApproximation = TRUE, calculateSingleStepAdjusted = FALSE ) conditionalDunnettSingleStepRejected <- .getConditionalDunnettTestForCI( design = design, stageResults = stageResults, treatmentArm = treatmentArm ) ifelse(conditionalDunnettSingleStepRejected, thetaLow <- theta, thetaUp <- theta) ifelse(iteration > 0, prec <- thetaUp - thetaLow, prec <- 0) iteration <- iteration - 1 } repeatedConfidenceIntervals[treatmentArm, 1, 2] <- theta thetaLow <- -1 thetaUp <- 1 iteration <- 50 prec <- 1 while (prec > tolerance) { theta <- (thetaLow + thetaUp) / 2 stageResults <- .getStageResultsRatesMultiArm( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = FALSE, intersectionTest = intersectionTest, normalApproximation = TRUE, calculateSingleStepAdjusted = FALSE ) conditionalDunnettSingleStepRejected <- .getConditionalDunnettTestForCI( design = design, stageResults = stageResults, treatmentArm = treatmentArm ) ifelse(conditionalDunnettSingleStepRejected, thetaUp <- theta, thetaLow <- theta) ifelse(iteration > 0, prec <- thetaUp - thetaLow, prec <- 0) iteration <- iteration - 1 } repeatedConfidenceIntervals[treatmentArm, 2, 2] <- theta if (!is.na(repeatedConfidenceIntervals[treatmentArm, 1, 2]) && !is.na(repeatedConfidenceIntervals[treatmentArm, 2, 2]) && repeatedConfidenceIntervals[treatmentArm, 1, 2] > repeatedConfidenceIntervals[treatmentArm, 2, 2]) { repeatedConfidenceIntervals[treatmentArm, , 2] <- rep(NA_real_, 2) } } } .logProgress("Confidence intervals for final stage calculated", startTime = startTime) } else { # Repeated onfidence intervals when using combination tests if (intersectionTest == "Hierarchical") { warning("Repeated confidence intervals not available for ", "'intersectionTest' = \"Hierarchical\"", call. = FALSE ) return(repeatedConfidenceIntervals) } if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT criticalValues <- design$criticalValues conditionFunction <- .isFirstValueSmallerThanSecondValue } else if (.isTrialDesignInverseNormal(design)) { bounds <- design$futilityBounds border <- C_FUTILITY_BOUNDS_DEFAULT criticalValues <- design$criticalValues criticalValues[is.infinite(criticalValues) & criticalValues > 0] <- C_QNORM_MAXIMUM criticalValues[is.infinite(criticalValues) & criticalValues < 0] <- C_QNORM_MINIMUM conditionFunction <- .isFirstValueGreaterThanSecondValue } # necessary for adjustment for binding futility boundaries futilityCorr <- rep(NA_real_, design$kMax) stages <- (1:stage) for (k in stages) { startTime <- Sys.time() for (treatmentArm in 1:gMax) { if (!is.na(stageResults$testStatistics[treatmentArm, k]) && criticalValues[k] < C_QNORM_MAXIMUM) { thetaLow <- -1 + tolerance thetaUp <- 1 - tolerance # finding upper and lower RCI limits through root function repeatedConfidenceIntervals[treatmentArm, 1, k] <- .getRootThetaRatesMultiArm( design = design, dataInput = dataInput, treatmentArm = treatmentArm, stage = k, directionUpper = TRUE, normalApproximation = normalApproximation, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) repeatedConfidenceIntervals[treatmentArm, 2, k] <- .getRootThetaRatesMultiArm( design = design, dataInput = dataInput, treatmentArm = treatmentArm, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) # adjustment for binding futility bounds if (k > 1 && !is.na(bounds[k - 1]) && conditionFunction(bounds[k - 1], border) && design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "singleStepAdjustedPValues", firstParameterName ) futilityCorr[k] <- .getRootThetaRatesMultiArm( design = design, dataInput = dataInput, treatmentArm = treatmentArm, stage = k - 1, directionUpper = directionUpper, normalApproximation = normalApproximation, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance ) if (directionUpper) { repeatedConfidenceIntervals[treatmentArm, 1, k] <- min( min(futilityCorr[2:k]), repeatedConfidenceIntervals[treatmentArm, 1, k] ) } else { repeatedConfidenceIntervals[treatmentArm, 2, k] <- max( max(futilityCorr[2:k]), repeatedConfidenceIntervals[treatmentArm, 2, k] ) } } if (!is.na(repeatedConfidenceIntervals[treatmentArm, 1, k]) && !is.na(repeatedConfidenceIntervals[treatmentArm, 2, k]) && repeatedConfidenceIntervals[treatmentArm, 1, k] > repeatedConfidenceIntervals[treatmentArm, 2, k]) { repeatedConfidenceIntervals[treatmentArm, , k] <- rep(NA_real_, 2) } } } .logProgress("Repeated confidence intervals for stage %s calculated", startTime = startTime, k) } } return(repeatedConfidenceIntervals) } #' #' RCIs based on inverse normal combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsRatesMultiArmInverseNormal <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { if (!normalApproximation) { message("Repeated confidence intervals will be calculated under the normal approximation") normalApproximation <- TRUE } .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsRatesMultiArmInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) return(.getRepeatedConfidenceIntervalsRatesMultiArmAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combInverseNormal", ... )) } #' #' RCIs based on Fisher's combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsRatesMultiArmFisher <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { if (!normalApproximation) { message("Repeated confidence intervals will be calculated under the normal approximation") normalApproximation <- TRUE } .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsRatesMultiArmFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsRatesMultiArmAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combFisher", ... )) } #' #' CIs based on conditional Dunnett test #' #' @noRd #' .getRepeatedConfidenceIntervalsRatesMultiArmConditionalDunnett <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsRatesMultiArmConditionalDunnett", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) return(.getRepeatedConfidenceIntervalsRatesMultiArmAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "condDunnett", ... )) } #' #' Calculation of repeated confidence intervals (RCIs) for Rates #' #' @noRd #' .getRepeatedConfidenceIntervalsRatesMultiArm <- function(..., design) { if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsRatesMultiArmInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsRatesMultiArmFisher(design = design, ...)) } if (.isTrialDesignConditionalDunnett(design)) { return(.getRepeatedConfidenceIntervalsRatesMultiArmConditionalDunnett(design = design, ...)) } .stopWithWrongDesignMessage(design) } #' #' Calculation of conditional power for Rates #' #' @noRd #' .getConditionalPowerRatesMultiArm <- function(..., stageResults, stage = stageResults$stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, piTreatments = NA_real_, piControl = NA_real_, useAdjustment = TRUE, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { design <- stageResults$.design gMax <- stageResults$getGMax() if (.isTrialDesignConditionalDunnett(design)) { kMax <- 2 } else { kMax <- design$kMax } piTreatmentsH1 <- .getOptionalArgument("piTreatmentsH1", ...) if (!is.null(piTreatmentsH1) && !is.na(piTreatmentsH1)) { if (!is.na(piTreatments)) { warning(sQuote("piTreatments"), " will be ignored because ", sQuote("piTreatmentsH1"), " is defined", call. = FALSE ) } piTreatments <- piTreatmentsH1 } piControlH1 <- .getOptionalArgument("piControlH1", ...) if (!is.null(piControlH1) && !is.na(piControlH1)) { if (!is.na(piControl)) { warning(sQuote("piControl"), " will be ignored because ", sQuote("piControlH1"), " is defined", call. = FALSE ) } piControl <- piControlH1 } results <- ConditionalPowerResultsMultiArmRates( .design = design, .stageResults = stageResults, piControl = piControl, piTreatments = piTreatments, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) if (any(is.na(nPlanned))) { return(results) } .assertIsValidStage(stage, kMax) if (stage == kMax) { .logDebug( "Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", kMax, ")" ) return(results) } if (!.isValidNPlanned(nPlanned = nPlanned, kMax = kMax, stage = stage)) { return(results) } .assertIsValidNPlanned(nPlanned, kMax, stage) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) .setValueAndParameterType(results, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) piControl <- .assertIsValidPiControlForMultiArm(piControl, stageResults, stage, results = results) piTreatments <- .assertIsValidPiTreatmentsForMultiArm(piTreatments, stageResults, stage, results = results) results$.setParameterType("nPlanned", C_PARAM_USER_DEFINED) if ((length(piTreatments) != 1) && (length(piTreatments) != gMax)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0( "length of 'piTreatments' (%s) ", "must be equal to 'gMax' (%s) or 1" ), .arrayToString(piTreatments), gMax) ) } if (length(piControl) != 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0("length of 'piControl' (%s) must be equal to 1"), .arrayToString(piControl)) ) } if (.isTrialDesignInverseNormal(design)) { return(.getConditionalPowerRatesMultiArmInverseNormal( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piControl = piControl, piTreatments = piTreatments, ... )) } else if (.isTrialDesignFisher(design)) { return(.getConditionalPowerRatesMultiArmFisher( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, useAdjustment = useAdjustment, piControl = piControl, piTreatments = piTreatments, iterations = iterations, seed = seed, ... )) } else if (.isTrialDesignConditionalDunnett(design)) { return(.getConditionalPowerRatesMultiArmConditionalDunnett( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piControl = piControl, piTreatments = piTreatments, ... )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of TrialDesignInverseNormal, TrialDesignFisher, ", "or TrialDesignConditionalDunnett" ) } #' #' Calculation of conditional power based on inverse normal method #' #' @noRd #' .getConditionalPowerRatesMultiArmInverseNormal <- function(..., results, design, stageResults, stage, allocationRatioPlanned, nPlanned, piTreatments, piControl) { .assertIsTrialDesignInverseNormal(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerRatesMultiArmInverseNormal", ignore = c("piTreatmentsH1", "piControlH1"), ... ) kMax <- design$kMax gMax <- stageResults$getGMax() weights <- .getWeightsInverseNormal(design) informationRates <- design$informationRates nPlanned <- c(rep(NA_real_, stage), nPlanned) condError <- .getConditionalRejectionProbabilitiesMultiArm(design = design, stageResults = stageResults)[, stage] ml <- (allocationRatioPlanned * piTreatments + piControl) / (1 + allocationRatioPlanned) adjustment <- .getOneMinusQNorm(condError) * (1 - sqrt(ml * (1 - ml) * (1 + allocationRatioPlanned)) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControl * (1 - piControl))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * sum(nPlanned[(stage + 1):kMax])) adjustment[condError < 1e-12] <- 0 results$.setParameterType("piControl", C_PARAM_DEFAULT_VALUE) if (length(piTreatments) == 1) { piTreatments <- rep(piTreatments, gMax) results$.setParameterType("piTreatments", C_PARAM_GENERATED) } else { results$.setParameterType("piTreatments", C_PARAM_DEFAULT_VALUE) } if (stageResults$directionUpper) { standardizedEffect <- (piTreatments - piControl - stageResults$thetaH0) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControl * (1 - piControl)) * sqrt(1 + allocationRatioPlanned) + adjustment } else { standardizedEffect <- -(piTreatments - piControl - stageResults$thetaH0) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControl * (1 - piControl)) * sqrt(1 + allocationRatioPlanned) + adjustment } nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned ctr <- .performClosedCombinationTest(stageResults = stageResults) criticalValues <- design$criticalValues for (treatmentArm in 1:gMax) { if (!is.na(ctr$separatePValues[treatmentArm, stage])) { # shifted decision region for use in getGroupSeqProbs # Inverse Normal Method shiftedDecisionRegionUpper <- criticalValues[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, treatmentArm] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - standardizedEffect[treatmentArm] * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, treatmentArm] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - standardizedEffect[treatmentArm] * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) results$conditionalPower[treatmentArm, (stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$piTreatments <- piTreatments results$piControl <- piControl return(results) } #' #' Calculation of conditional power based on Fisher's combination test #' #' @noRd #' .getConditionalPowerRatesMultiArmFisher <- function(..., results, design, stageResults, stage, allocationRatioPlanned, nPlanned, piTreatments, piControl, useAdjustment = TRUE, iterations, seed) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerRatesMultiArmFisher", ignore = c("piTreatmentsH1", "piControlH1"), ... ) kMax <- design$kMax gMax <- stageResults$getGMax() criticalValues <- design$criticalValues weightsFisher <- .getWeightsFisher(design) results$iterations <- as.integer(iterations) results$.setParameterType("iterations", C_PARAM_USER_DEFINED) results$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) results$seed <- .setSeed(seed) results$simulated <- FALSE results$.setParameterType("simulated", C_PARAM_DEFAULT_VALUE) nPlanned <- c(rep(NA_real_, stage), nPlanned) if (useAdjustment) { condError <- .getConditionalRejectionProbabilitiesMultiArm( design = design, stageResults = stageResults, iterations = iterations, seed = seed )[, stage] ml <- (allocationRatioPlanned * piTreatments + piControl) / (1 + allocationRatioPlanned) adjustment <- .getOneMinusQNorm(condError) * (1 - sqrt(ml * (1 - ml) * (1 + allocationRatioPlanned)) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControl * (1 - piControl))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * sum(nPlanned[(stage + 1):kMax])) adjustment[condError < 1e-12] <- 0 } else { adjustment <- 0 } if (length(piTreatments) == 1) { piTreatments <- rep(piTreatments, gMax) results$.setParameterType("piTreatments", C_PARAM_GENERATED) } else { results$.setParameterType("piTreatments", C_PARAM_DEFAULT_VALUE) } if (stageResults$directionUpper) { standardizedEffect <- (piTreatments - piControl) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControl * (1 - piControl)) * sqrt(1 + allocationRatioPlanned) + adjustment } else { standardizedEffect <- -(piTreatments - piControl - stageResults$thetaH0) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControl * (1 - piControl)) * sqrt(1 + allocationRatioPlanned) + adjustment } nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned ctr <- .performClosedCombinationTest(stageResults = stageResults) for (treatmentArm in 1:gMax) { if (!is.na(ctr$separatePValues[treatmentArm, stage])) { if (gMax == 1) { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, treatmentArm] == 1, ][1:stage] } else { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, treatmentArm] == 1, ][which.max( ctr$overallAdjustedTestStatistics[ctr$indices[, treatmentArm] == 1, stage] ), 1:stage] } if (stage < kMax - 1) { for (k in (stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = standardizedEffect[treatmentArm], stage = stage, nPlanned = nPlanned ) } results$conditionalPower[treatmentArm, k] <- reject / iterations } results$simulated <- TRUE results$.setParameterType("simulated", C_PARAM_GENERATED) } else if (stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1 / weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Calculation not possible: could not calculate conditional power for stage ", kMax, call. = FALSE) results$conditionalPower[treatmentArm, kMax] <- NA_real_ } else { results$conditionalPower[treatmentArm, kMax] <- 1 - stats::pnorm(.getQNorm(result) - standardizedEffect[treatmentArm] * sqrt(nPlanned[kMax])) } } } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$piTreatments <- piTreatments results$piControl <- piControl if (!results$simulated) { results$iterations <- NA_integer_ results$seed <- NA_real_ results$.setParameterType("iterations", C_PARAM_NOT_APPLICABLE) results$.setParameterType("seed", C_PARAM_NOT_APPLICABLE) } return(results) } #' #' Calculation of conditional power based on conditional Dunnett test #' #' @noRd #' .getConditionalPowerRatesMultiArmConditionalDunnett <- function(..., results, design, stageResults, stage, allocationRatioPlanned, nPlanned, piTreatments, piControl) { .assertIsTrialDesignConditionalDunnett(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerRatesMultiArmConditionalDunnett", ignore = c("intersectionTest", "piTreatmentsH1", "piControlH1"), ... ) if (stage > 1) { warning("Conditional power is only calculated for the first (interim) stage", call. = FALSE) } gMax <- stageResults$getGMax() nPlanned <- c(rep(NA_real_, stage), nPlanned) condError <- .getConditionalRejectionProbabilitiesMultiArm(design = design, stageResults = stageResults)[, 2] ml <- (allocationRatioPlanned * piTreatments + piControl) / (1 + allocationRatioPlanned) adjustment <- .getOneMinusQNorm(condError) * (1 - sqrt(ml * (1 - ml) * (1 + allocationRatioPlanned)) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControl * (1 - piControl))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * sum(nPlanned[(stage + 1):2])) adjustment[condError < 1e-12] <- 0 if (length(piTreatments) == 1) { piTreatments <- rep(piTreatments, gMax) results$.setParameterType("piTreatments", C_PARAM_GENERATED) } else { results$.setParameterType("piTreatments", C_PARAM_DEFAULT_VALUE) } nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned if (stageResults$directionUpper) { standardizedEffect <- (piTreatments - piControl - stageResults$thetaH0) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControl * (1 - piControl)) * sqrt(1 + allocationRatioPlanned) + adjustment } else { standardizedEffect <- -(piTreatments - piControl - stageResults$thetaH0) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControl * (1 - piControl)) * sqrt(1 + allocationRatioPlanned) + adjustment } ctr <- .getClosedConditionalDunnettTestResults(stageResults = stageResults, design = design, stage = stage) for (treatmentArm in 1:gMax) { if (!is.na(ctr$separatePValues[treatmentArm, stage])) { results$conditionalPower[treatmentArm, 2] <- 1 - stats::pnorm(.getOneMinusQNorm(min(ctr$conditionalErrorRate[ ctr$indices[, treatmentArm] == 1, stage ], na.rm = TRUE)) - standardizedEffect[treatmentArm] * sqrt(nPlanned[2])) } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$piTreatments <- piTreatments results$piControl <- piControl return(results) } #' #' Calculation of conditional power and likelihood values for plotting the graph #' #' @noRd #' .getConditionalPowerLikelihoodRatesMultiArm <- function(..., stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, piTreatmentRange, piControl = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .associatedArgumentsAreDefined(nPlanned = nPlanned, piTreatmentRange = piTreatmentRange) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) design <- stageResults$.design kMax <- design$kMax gMax <- stageResults$getGMax() intersectionTest <- stageResults$intersectionTest piControl <- .assertIsValidPiControlForMultiArm(piControl, stageResults, stage) if (length(piControl) != 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piControl' (", .arrayToString(piControl), ") must be equal to 1" ) } piTreatmentRange <- .assertIsValidPiTreatmentRange(piTreatmentRange = piTreatmentRange) treatmentArms <- numeric(gMax * length(piTreatmentRange)) effectValues <- numeric(gMax * length(piTreatmentRange)) condPowerValues <- numeric(gMax * length(piTreatmentRange)) likelihoodValues <- numeric(gMax * length(piTreatmentRange)) stdErr <- sqrt(stageResults$overallPiTreatments[, stage] * (1 - stageResults$overallPiTreatments[, stage])) / sqrt(stageResults$.dataInput$getOverallSampleSizes(stage = stage, group = (1:gMax))) results <- ConditionalPowerResultsMultiArmRates( .design = design, .stageResults = stageResults, piControl = piControl, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) j <- 1 for (i in seq(along = piTreatmentRange)) { for (treatmentArm in (1:gMax)) { treatmentArms[j] <- treatmentArm effectValues[j] <- piTreatmentRange[i] if (.isTrialDesignInverseNormal(design)) { condPowerValues[j] <- .getConditionalPowerRatesMultiArmInverseNormal( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piControl = piControl, piTreatments = piTreatmentRange[i] )$conditionalPower[treatmentArm, kMax] } else if (.isTrialDesignFisher(design)) { condPowerValues[j] <- .getConditionalPowerRatesMultiArmFisher( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, useAdjustment = FALSE, piControl = piControl, piTreatments = piTreatmentRange[i], iterations = iterations, seed = seed )$conditionalPower[treatmentArm, kMax] } else if (.isTrialDesignConditionalDunnett(design)) { condPowerValues[j] <- .getConditionalPowerRatesMultiArmConditionalDunnett( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piControl = piControl, piTreatments = piTreatmentRange[i] )$conditionalPower[treatmentArm, 2] } likelihoodValues[j] <- stats::dnorm(piTreatmentRange[i], stageResults$overallPiTreatments[treatmentArm, stage], stdErr[treatmentArm]) / stats::dnorm(0, 0, stdErr[treatmentArm]) j <- j + 1 } } subtitle <- paste0( "Intersection test = ", intersectionTest, ", stage = ", stage, ", # of remaining subjects = ", sum(nPlanned), ", control rate = ", .formatSubTitleValue(piControl, "piControl"), ", allocation ratio = ", .formatSubTitleValue(allocationRatioPlanned, "allocationRatioPlanned") ) return(list( treatmentArms = treatmentArms, xValues = effectValues, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, xlab = "Treatment rate", ylab = C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD, sub = subtitle )) } rpact/R/class_analysis_results.R0000644000176200001440000022036514445307575016526 0ustar liggesusers## | ## | *Analysis result classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' #' @name ConditionalPowerResults #' #' @title #' Conditional Power Results #' #' @description #' Class for conditional power calculations #' #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_iterations #' @template field_seed #' @template field_simulated #' @template field_conditionalPower #' @template field_thetaH1 #' @template field_assumedStDev #' #' @details #' This object cannot be created directly; use \code{\link[=getConditionalPower]{getConditionalPower()}} #' with suitable arguments to create the results of a group sequential or a combination test design. #' #' @keywords internal #' #' @importFrom methods new #' ConditionalPowerResults <- setRefClass("ConditionalPowerResults", contains = "ParameterSet", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .stageResults = "StageResults", .plotData = "list", nPlanned = "numeric", allocationRatioPlanned = "numeric", iterations = "integer", seed = "numeric", simulated = "logical" ), methods = list( initialize = function(...) { callSuper(...) .plotSettings <<- PlotSettings() .parameterNames <<- C_PARAMETER_NAMES .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS if (!is.null(.stageResults) && is.null(.design)) { .design <<- .stageResults$.design } if (is.null(simulated) || length(simulated) == 0 || is.na(simulated)) { .self$simulated <<- FALSE } if (!is.null(.design) && length(.design$kMax) == 1 && .design$kMax == 1L) { .setParameterType("nPlanned", C_PARAM_NOT_APPLICABLE) .setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) .setParameterType("conditionalPower", C_PARAM_NOT_APPLICABLE) } else { .setParameterType("nPlanned", C_PARAM_GENERATED) .setParameterType("allocationRatioPlanned", C_PARAM_USER_DEFINED) .setParameterType("conditionalPower", C_PARAM_GENERATED) } .setParameterType("simulated", C_PARAM_NOT_APPLICABLE) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing conditional power result objects" .resetCat() if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { if (!is.null(.design) && length(.design$kMax) == 1 && .design$kMax == 1) { .cat(.toString(), ": not applicable for fixed design (kMax = 1)\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) } else { .cat(.toString(), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) } .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), "Output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } }, .toString = function(startWithUpperCase = FALSE) { return("Conditional power results") } ) ) #' #' @name ConditionalPowerResultsMeans #' #' @title #' Conditional Power Results Means #' #' @description #' Class for conditional power calculations of means data #' #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_iterations #' @template field_seed #' @template field_simulated #' @template field_conditionalPower #' @template field_thetaH1 #' @template field_assumedStDev #' #' @details #' This object cannot be created directly; use \code{\link{getConditionalPower}} #' with suitable arguments to create the results of a group sequential or a combination test design. #' #' @keywords internal #' #' @importFrom methods new #' ConditionalPowerResultsMeans <- setRefClass("ConditionalPowerResultsMeans", contains = "ConditionalPowerResults", fields = list( conditionalPower = "numeric", thetaH1 = "numeric", assumedStDev = "numeric" ), methods = list( initialize = function(...) { callSuper(...) if ((is.null(conditionalPower) || length(conditionalPower) == 0) && !is.null(.design) && !is.null(.design$kMax) && length(.design$kMax) > 0) { conditionalPower <<- rep(NA_real_, .design$kMax) } if (is.null(thetaH1) || length(thetaH1) == 0 || all(is.na(thetaH1))) { thetaH1 <<- NA_real_ } if (is.null(assumedStDev) || length(assumedStDev) == 0 || all(is.na(assumedStDev))) { assumedStDev <<- NA_real_ } }, .toString = function(startWithUpperCase = FALSE) { return("Conditional power results means") } ) ) ConditionalPowerResultsMultiHypotheses <- setRefClass("ConditionalPowerResultsMultiHypotheses", contains = "ConditionalPowerResults", fields = list( conditionalPower = "matrix" ), methods = list( initialize = function(...) { callSuper(...) if (.readyForInitialization()) { gMax <- getGMax() kMax <- .design$kMax if (is.null(conditionalPower) || (nrow(conditionalPower) == 0 && ncol(conditionalPower) == 0)) { conditionalPower <<- matrix(rep(NA_real_, gMax * kMax), nrow = gMax, ncol = kMax) } } }, .toString = function(startWithUpperCase = FALSE) { s <- "Conditional power results" s <- paste0(s, " ", ifelse(grepl("Enrichment", .getClassName(.stageResults)), "enrichment", "multi-arm")) if (grepl("Means", .getClassName(.self))) { s <- paste0(s, " means") } else if (grepl("Rates", .getClassName(.self))) { s <- paste0(s, " rates") } else if (grepl("Survival", .getClassName(.self))) { s <- paste0(s, " survival") } return(s) }, getGMax = function() { return(.stageResults$getGMax()) }, .readyForInitialization = function() { if (is.null(.design)) { return(FALSE) } if (length(.design$kMax) != 1) { return(FALSE) } if (is.null(.stageResults)) { return(FALSE) } if (is.null(.stageResults$testStatistics)) { return(FALSE) } return(TRUE) } ) ) ConditionalPowerResultsMultiArmMeans <- setRefClass("ConditionalPowerResultsMultiArmMeans", contains = "ConditionalPowerResultsMultiHypotheses", fields = list( thetaH1 = "numeric", assumedStDevs = "numeric" ), methods = list( initialize = function(...) { callSuper(...) if (.readyForInitialization()) { gMax <- getGMax() if (is.null(thetaH1) || length(thetaH1) == 0 || all(is.na(thetaH1))) { thetaH1 <<- rep(NA_real_, gMax) } if (is.null(assumedStDevs) || length(assumedStDevs) == 0 || all(is.na(assumedStDevs))) { assumedStDevs <<- rep(NA_real_, gMax) } } } ) ) #' #' @name ConditionalPowerResultsRates #' #' @title #' Conditional Power Results Rates #' #' @description #' Class for conditional power calculations of rates data #' #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_iterations #' @template field_seed #' @template field_simulated #' @template field_conditionalPower #' @template field_pi1 #' @template field_pi2 #' #' @details #' This object cannot be created directly; use \code{\link{getConditionalPower}} #' with suitable arguments to create the results of a group sequential or a combination test design. #' #' @keywords internal #' #' @importFrom methods new #' ConditionalPowerResultsRates <- setRefClass("ConditionalPowerResultsRates", contains = "ConditionalPowerResults", fields = list( conditionalPower = "numeric", pi1 = "numeric", pi2 = "numeric" ), methods = list( initialize = function(...) { callSuper(...) if ((is.null(conditionalPower) || length(conditionalPower) == 0) && !is.null(.design) && !is.null(.design$kMax) && length(.design$kMax) > 0) { conditionalPower <<- rep(NA_real_, .design$kMax) } if (is.null(pi1) || length(pi1) == 0 || all(is.na(pi1))) { pi1 <<- NA_real_ } if (is.null(pi2) || length(pi2) == 0 || all(is.na(pi2))) { pi2 <<- NA_real_ } }, .toString = function(startWithUpperCase = FALSE) { return("Conditional power results rates") } ) ) ConditionalPowerResultsMultiArmRates <- setRefClass("ConditionalPowerResultsMultiArmRates", contains = "ConditionalPowerResultsMultiHypotheses", fields = list( piTreatments = "numeric", piControl = "numeric" ), methods = list( initialize = function(...) { callSuper(...) if (.readyForInitialization()) { gMax <- getGMax() if (is.null(piControl) || length(piControl) == 0 || all(is.na(piControl))) { piControl <<- NA_real_ } if (is.null(piTreatments) || length(piTreatments) == 0 || all(is.na(piTreatments))) { piTreatments <<- rep(NA_real_, gMax) } } } ) ) #' #' @name ConditionalPowerResultsSurvival #' #' @title #' Conditional Power Results Survival #' #' @description #' Class for conditional power calculations of survival data #' #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_iterations #' @template field_seed #' @template field_simulated #' @template field_conditionalPower #' @template field_thetaH1_survival #' #' @details #' This object cannot be created directly; use \code{\link{getConditionalPower}} #' with suitable arguments to create the results of a group sequential or a combination test design. #' #' @keywords internal #' #' @importFrom methods new #' ConditionalPowerResultsSurvival <- setRefClass("ConditionalPowerResultsSurvival", contains = "ConditionalPowerResults", fields = list( conditionalPower = "numeric", thetaH1 = "numeric" ), methods = list( initialize = function(...) { callSuper(...) if ((is.null(conditionalPower) || length(conditionalPower) == 0) && !is.null(.design) && !is.null(.design$kMax) && length(.design$kMax) > 0) { conditionalPower <<- rep(NA_real_, .design$kMax) } if (is.null(thetaH1) || length(thetaH1) == 0 || all(is.na(thetaH1))) { thetaH1 <<- NA_real_ } }, .toString = function(startWithUpperCase = FALSE) { return("Conditional power results survival") } ) ) ConditionalPowerResultsMultiArmSurvival <- setRefClass("ConditionalPowerResultsMultiArmSurvival", contains = "ConditionalPowerResultsMultiHypotheses", fields = list( thetaH1 = "numeric" ), methods = list( initialize = function(...) { callSuper(...) if (.readyForInitialization()) { gMax <- getGMax() if (is.null(thetaH1) || length(thetaH1) == 0 || all(is.na(thetaH1))) { thetaH1 <<- rep(NA_real_, gMax) } } } ) ) #' #' @name ConditionalPowerResultsEnrichmentMeans #' #' @title #' Conditional Power Results Enrichment Means #' #' @description #' Class for conditional power calculations of enrichment means data #' #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_iterations #' @template field_seed #' @template field_simulated #' @template field_conditionalPower #' @template field_thetaH1 #' @template field_assumedStDevs #' #' @details #' This object cannot be created directly; use \code{\link{getConditionalPower}} #' with suitable arguments to create the results of a group sequential or a combination test design. #' #' @keywords internal #' #' @importFrom methods new #' ConditionalPowerResultsEnrichmentMeans <- setRefClass("ConditionalPowerResultsEnrichmentMeans", contains = "ConditionalPowerResultsMultiArmMeans" ) #' #' @name ConditionalPowerResultsEnrichmentRates #' #' @title #' Conditional Power Results Enrichment Rates #' #' @description #' Class for conditional power calculations of enrichment rates data #' #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_iterations #' @template field_seed #' @template field_simulated #' @template field_conditionalPower #' @template field_piTreatments #' @template field_piControls #' #' @details #' This object cannot be created directly; use \code{\link{getConditionalPower}} #' with suitable arguments to create the results of a group sequential or a combination test design. #' #' @keywords internal #' #' @importFrom methods new #' ConditionalPowerResultsEnrichmentRates <- setRefClass("ConditionalPowerResultsEnrichmentRates", contains = "ConditionalPowerResultsMultiHypotheses", fields = list( piTreatments = "numeric", piControls = "numeric" ), methods = list( initialize = function(...) { callSuper(...) if (.readyForInitialization()) { gMax <- getGMax() if (is.null(piControls) || length(piControls) == 0 || all(is.na(piControls))) { piControls <<- rep(NA_real_, gMax) } if (is.null(piTreatments) || length(piTreatments) == 0 || all(is.na(piTreatments))) { piTreatments <<- rep(NA_real_, gMax) } } } ) ) ConditionalPowerResultsEnrichmentSurvival <- setRefClass("ConditionalPowerResultsEnrichmentSurvival", contains = "ConditionalPowerResultsMultiArmSurvival" ) #' #' @name ClosedCombinationTestResults #' #' @title #' Analysis Results Closed Combination Test #' #' @description #' Class for multi-arm analysis results based on a closed combination test. #' #' @template field_intersectionTest #' @template field_indices #' @template field_adjustedStageWisePValues #' @template field_overallAdjustedTestStatistics #' @template field_separatePValues #' @template field_conditionalErrorRate #' @template field_secondStagePValues #' @template field_rejected #' @template field_rejectedIntersections #' #' @details #' This object cannot be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the multi-arm analysis results of a closed combination test design. #' #' @keywords internal #' #' @importFrom methods new #' ClosedCombinationTestResults <- setRefClass("ClosedCombinationTestResults", contains = "ParameterSet", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .enrichment = "logical", intersectionTest = "character", indices = "matrix", adjustedStageWisePValues = "matrix", overallAdjustedTestStatistics = "matrix", separatePValues = "matrix", conditionalErrorRate = "matrix", secondStagePValues = "matrix", rejected = "matrix", rejectedIntersections = "matrix" ), methods = list( initialize = function(...) { callSuper(...) .plotSettings <<- PlotSettings() .parameterNames <<- C_PARAMETER_NAMES .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS .setParameterType("intersectionTest", C_PARAM_USER_DEFINED) parametersGenerated <- c( "indices", "separatePValues", "rejected", "rejectedIntersections" ) if (inherits(.design, "TrialDesignConditionalDunnett")) { parametersGenerated <- c( parametersGenerated, "conditionalErrorRate", "secondStagePValues" ) } else { parametersGenerated <- c( parametersGenerated, "adjustedStageWisePValues", "overallAdjustedTestStatistics" ) } for (param in parametersGenerated) { .setParameterType(param, C_PARAM_GENERATED) } if (!is.null(.design) && inherits(.design, C_CLASS_NAME_TRIAL_DESIGN_FISHER)) { .parameterFormatFunctions$overallAdjustedTestStatistics <<- ".formatTestStatisticsFisher" } }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing closed combination test result objects" .resetCat() if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .cat(.toString(), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) designParametersToShow <- c( ".design$stages", ".design$alpha" ) if (inherits(.design, "TrialDesignConditionalDunnett")) { designParametersToShow <- c( designParametersToShow, ".design$informationAtInterim", ".design$secondStageConditioning" ) } .showParametersOfOneGroup(designParametersToShow, "Design parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), "Output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) if (isTRUE(.enrichment)) { .cat(paste0(" S[i]: population i\n"), consoleOutputEnabled = consoleOutputEnabled) .cat(paste0(" F: full population\n"), consoleOutputEnabled = consoleOutputEnabled) } else { .cat(paste0( " (i): results of treatment arm i vs. control group ", (nrow(separatePValues) + 1), "\n" ), consoleOutputEnabled = consoleOutputEnabled) .cat(" [i]: hypothesis number\n", consoleOutputEnabled = consoleOutputEnabled ) } } }, .toString = function(startWithUpperCase = FALSE) { s <- "Closed combination test results" if (inherits(.design, "TrialDesignConditionalDunnett")) { s <- paste0(s, " (Conditional Dunnett)") } return(s) }, .getHypothesisTreatmentArms = function(number) { result <- c() for (i in 1:ncol(indices)) { if (indices[number, i] == 1) { result <- c(result, i) } } return(result) }, .getHypothesisTreatmentArmVariants = function() { result <- c() for (number in 1:nrow(indices)) { arms <- .getHypothesisTreatmentArms(number) result <- c(result, paste0(arms, collapse = ", ")) } return(result) }, .getHypothesisPopulationVariants = function() { result <- c() gMax <- 1 for (number in 1:nrow(indices)) { arms <- .getHypothesisTreatmentArms(number) if (number == 1) { gMax <- length(arms) } arms <- paste0("S", arms) arms[arms == paste0("S", gMax)] <- "F" result <- c(result, paste0(arms, collapse = ", ")) } return(result) } ) ) #' #' @name AnalysisResults #' #' @title #' Basic Class for Analysis Results #' #' @description #' A basic class for analysis results. #' #' @details #' \code{AnalysisResults} is the basic class for #' \itemize{ #' \item \code{\link{AnalysisResultsFisher}}, #' \item \code{\link{AnalysisResultsGroupSequential}}, #' \item \code{\link{AnalysisResultsInverseNormal}}, #' \item \code{\link{AnalysisResultsMultiArmFisher}}, #' \item \code{\link{AnalysisResultsMultiArmInverseNormal}}, #' \item \code{\link{AnalysisResultsConditionalDunnett}}, #' \item \code{\link{AnalysisResultsEnrichmentFisher}}, #' \item \code{\link{AnalysisResultsEnrichmentInverseNormal}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' @include class_analysis_stage_results.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResults <- setRefClass("AnalysisResults", contains = "ParameterSet", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .dataInput = "Dataset", .stageResults = "StageResults", .conditionalPowerResults = "ConditionalPowerResults", normalApproximation = "logical", directionUpper = "logical", thetaH0 = "numeric", pi1 = "numeric", pi2 = "numeric", nPlanned = "numeric", allocationRatioPlanned = "numeric" ), methods = list( initialize = function(design, dataInput, ...) { callSuper(.design = design, .dataInput = dataInput, ...) .plotSettings <<- PlotSettings() .parameterNames <<- .getParameterNames(design = design, analysisResults = .self) .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, .setStageResults = function(stageResults) { .stageResults <<- stageResults .parameterNames <<- .getParameterNames(design = .design, stageResults = stageResults, analysisResults = .self) }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .getStageResultParametersToShow = function() { stageResultParametersToShow <- c() if (.design$kMax > 1) { if (!grepl("Rates", .getClassName(.dataInput)) || .dataInput$getNumberOfGroups() > 1) { stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$effectSizes") } if (grepl("Means", .getClassName(.dataInput))) { stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$overallStDevs") } if (grepl("Rates", .getClassName(.dataInput))) { if (.isMultiArmAnalysisResults(.self)) { stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$overallPiTreatments") stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$overallPiControl") } else if (.isEnrichmentAnalysisResults(.self)) { stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$overallPisTreatment") stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$overallPisControl") } else { stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$overallPi1") if (.dataInput$getNumberOfGroups() > 1) { stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$overallPi2") } } } } stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$testStatistics") if (grepl("(MultiArm|Dunnett|Enrichment)", .getClassName(.self))) { stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$separatePValues") } else { stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$pValues") } if (.design$kMax == 1) { # return(stageResultParametersToShow) } # show combination test statistics if (.isTrialDesignInverseNormal(.design)) { stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$combInverseNormal") } else if (.isTrialDesignGroupSequential(.design)) { stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$overallTestStatistics") stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$overallPValues") } else if (.isTrialDesignFisher(.design)) { stageResultParametersToShow <- c(stageResultParametersToShow, ".stageResults$combFisher") } return(stageResultParametersToShow) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing analysis result objects" .resetCat() if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .cat(.toString(startWithUpperCase = TRUE), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDesignParametersToShow(.self), "Design parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getStageResultParametersToShow(), "Stage results", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) # show multi-arm parameters if (grepl("(MultiArm|Dunnett|Enrichment)", .getClassName(.self))) { if (.isTrialDesignConditionalDunnett(.design)) { .showParametersOfOneGroup(".closedTestResults$conditionalErrorRate", "Conditional error rate", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(".closedTestResults$secondStagePValues", "Second stage p-values", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) } else { .showParametersOfOneGroup(".closedTestResults$adjustedStageWisePValues", "Adjusted stage-wise p-values", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(".closedTestResults$overallAdjustedTestStatistics", "Overall adjusted test statistics", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) } .showParametersOfOneGroup(".closedTestResults$rejected", "Test actions", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) } generatedParams <- .getGeneratedParameters() generatedParams <- generatedParams[!(generatedParams %in% c("assumedStDevs", "thetaH1", "pi1", "pi2", "piTreatments", "piTreatments", "piControl", "piControls"))] if (grepl("(MultiArm|Dunnett|Enrichment)", .getClassName(.self))) { if (all(c("conditionalPowerSimulated", "conditionalRejectionProbabilities") %in% generatedParams)) { generatedParams <- .moveValue( generatedParams, "conditionalPowerSimulated", "conditionalRejectionProbabilities" ) } .showParametersOfOneGroup(generatedParams, "Further analysis results", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) } else { .showParametersOfOneGroup(generatedParams, "Analysis results", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) } .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) if (grepl("(MultiArm|Dunnett)", .getClassName(.self))) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) .cat( paste0( " (i): results of treatment arm i vs. control group ", .dataInput$getNumberOfGroups(), "\n" ), consoleOutputEnabled = consoleOutputEnabled ) } else if (.isEnrichmentAnalysisResults(.self)) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) .cat(paste0(" S[i]: population i\n"), consoleOutputEnabled = consoleOutputEnabled) .cat(paste0(" F: full population\n"), consoleOutputEnabled = consoleOutputEnabled) } else if (grepl("Rates", .getClassName(.dataInput)) && .dataInput$getNumberOfGroups() == 2) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) .cat(" (i): values of treatment arm i\n", consoleOutputEnabled = consoleOutputEnabled) } } }, .toString = function(startWithUpperCase = FALSE) { str <- "analysis results" if (inherits(.self, "AnalysisResultsMultiArm")) { str <- paste0("multi-arm ", str) } else if (inherits(.self, "AnalysisResultsEnrichment")) { str <- paste0("enrichment ", str) } if (startWithUpperCase) { str <- .firstCharacterToUpperCase(str) } numberOfGroups <- .dataInput$getNumberOfGroups() str <- paste0(str, " (") str <- paste0(str, tolower(sub("Dataset(Enrichment)?", "", .getClassName(.dataInput)))) if (grepl("Survival", .getClassName(.getClassName))) { str <- paste0(str, " data") } if (numberOfGroups == 1) { str <- paste0(str, " of one group") } else { str <- paste0(str, " of ", numberOfGroups, " groups") } if (.design$kMax > 1) { if (grepl("GroupSequential", .getClassName(.self))) { str <- paste0(str, ", group sequential design") } else if (grepl("InverseNormal", .getClassName(.self))) { str <- paste0(str, ", inverse normal combination test design") } else if (grepl("Fisher", .getClassName(.self))) { str <- paste0(str, ", Fisher's combination test design") } else if (grepl("Dunnett", .getClassName(.self))) { str <- paste0(str, ", conditional Dunnett design") } } else { str <- paste0(str, ", fixed sample size design") } str <- paste0(str, ")") return(str) }, getNumberOfStages = function() { return(.stageResults$getNumberOfStages()) }, getDataInput = function() { return(.dataInput) } ) ) AnalysisResultsBase <- setRefClass("AnalysisResultsBase", contains = "AnalysisResults", fields = list( thetaH1 = "numeric", assumedStDev = "numeric", equalVariances = "logical", testActions = "character", conditionalRejectionProbabilities = "numeric", conditionalPower = "numeric", repeatedConfidenceIntervalLowerBounds = "numeric", repeatedConfidenceIntervalUpperBounds = "numeric", repeatedPValues = "numeric", finalStage = "integer", finalPValues = "numeric", finalConfidenceIntervalLowerBounds = "numeric", finalConfidenceIntervalUpperBounds = "numeric", medianUnbiasedEstimates = "numeric" ), methods = list( initialize = function(design, dataInput, ...) { callSuper(design = design, dataInput = dataInput, ...) finalStage <<- NA_integer_ } ) ) #' #' @name AnalysisResultsMultiHypotheses #' #' @title #' Basic Class for Analysis Results Multi-Hypotheses #' #' @description #' A basic class for multi-hypotheses analysis results. #' #' @details #' \code{AnalysisResultsMultiHypotheses} is the basic class for #' \itemize{ #' \item \code{\link{AnalysisResultsMultiArm}} and #' \item \code{\link{AnalysisResultsEnrichment}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' @include class_analysis_stage_results.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsMultiHypotheses <- setRefClass("AnalysisResultsMultiHypotheses", contains = "AnalysisResults", fields = list( .closedTestResults = "ClosedCombinationTestResults", thetaH1 = "matrix", # means only assumedStDevs = "matrix", # means only piTreatments = "matrix", # rates only intersectionTest = "character", varianceOption = "character", conditionalRejectionProbabilities = "matrix", conditionalPower = "matrix", repeatedConfidenceIntervalLowerBounds = "matrix", repeatedConfidenceIntervalUpperBounds = "matrix", repeatedPValues = "matrix" ), methods = list( initialize = function(design, dataInput, ...) { callSuper(design = design, dataInput = dataInput, ...) for (param in c("thetaH1", "assumedStDevs", "piTreatments")) { .setParameterType(param, C_PARAM_NOT_APPLICABLE) } } ) ) #' #' @name AnalysisResultsMultiArm #' #' @title #' Basic Class for Analysis Results Multi-Arm #' #' @description #' A basic class for multi-arm analysis results. #' #' @details #' \code{AnalysisResultsMultiArm} is the basic class for #' \itemize{ #' \item \code{\link{AnalysisResultsMultiArmFisher}}, #' \item \code{\link{AnalysisResultsMultiArmInverseNormal}}, and #' \item \code{\link{AnalysisResultsConditionalDunnett}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' @include class_analysis_stage_results.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsMultiArm <- setRefClass("AnalysisResultsMultiArm", contains = "AnalysisResultsMultiHypotheses", fields = list( piControl = "matrix" # rates only ), methods = list( initialize = function(design, dataInput, ...) { callSuper(design = design, dataInput = dataInput, ...) .setParameterType("piControl", C_PARAM_NOT_APPLICABLE) }, .getParametersToShow = function() { parametersToShow <- .getVisibleFieldNames() if ("piTreatments" %in% parametersToShow && "piControl" %in% parametersToShow) { index <- which(parametersToShow == "piTreatments") parametersToShow <- parametersToShow[parametersToShow != "piControl"] parametersToShow <- c( parametersToShow[1:index], "piControl", parametersToShow[(index + 1):length(parametersToShow)] ) } return(parametersToShow) } ) ) #' #' @name AnalysisResultsEnrichment #' #' @title #' Basic Class for Analysis Results Enrichment #' #' @description #' A basic class for enrichment analysis results. #' #' @details #' \code{AnalysisResultsEnrichment} is the basic class for #' \itemize{ #' \item \code{\link{AnalysisResultsEnrichmentFisher}} and #' \item \code{\link{AnalysisResultsEnrichmentInverseNormal}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' @include class_analysis_stage_results.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsEnrichment <- setRefClass("AnalysisResultsEnrichment", contains = "AnalysisResultsMultiHypotheses", fields = list( piControls = "matrix" # rates only ), methods = list( initialize = function(design, dataInput, ...) { callSuper(design = design, dataInput = dataInput, ...) .setParameterType("piControls", C_PARAM_NOT_APPLICABLE) } ) ) #' #' @title #' Analysis Results Summary #' #' @description #' Displays a summary of \code{\link{AnalysisResults}} object. #' #' @param object An \code{\link{AnalysisResults}} object. #' @inheritParams param_digits #' @inheritParams param_three_dots #' #' @details #' Summarizes the parameters and results of an analysis results object. #' #' @template details_summary #' #' @template return_object_summary #' @template how_to_get_help_for_generics #' #' @export #' #' @keywords internal #' summary.AnalysisResults <- function(object, ..., type = 1, digits = NA_integer_) { return(summary.ParameterSet(object = object, ..., type = type, digits = digits)) } #' #' @title #' Coerce AnalysisResults to a Data Frame #' #' @description #' Returns the \code{\link{AnalysisResults}} object as data frame. #' #' @param x An \code{\link{AnalysisResults}} object created by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' @inheritParams param_niceColumnNamesEnabled #' @inheritParams param_three_dots #' #' @details #' Coerces the analysis results to a data frame. #' #' @template return_dataframe #' #' @export #' #' @keywords internal #' as.data.frame.AnalysisResults <- function(x, row.names = NULL, optional = FALSE, ..., niceColumnNamesEnabled = FALSE) { parametersToShow <- .getDesignParametersToShow(x) if (inherits(x, "AnalysisResultsMultiArm")) { parametersToShow <- c(parametersToShow, ".closedTestResults$rejected") } parametersToShow <- c(parametersToShow, x$.getUserDefinedParameters()) parametersToShow <- c(parametersToShow, x$.getDefaultParameters()) parametersToShow <- c(parametersToShow, x$.getStageResultParametersToShow()) parametersToShow <- c(parametersToShow, x$.getGeneratedParameters()) parametersToShow <- parametersToShow[!(parametersToShow %in% c( "finalStage", "allocationRatioPlanned", "thetaH0", "thetaH1", "pi1", "pi2" ))] return(.getAsDataFrame( parameterSet = x, parameterNames = parametersToShow, tableColumnNames = .getTableColumnNames(design = x$.design), niceColumnNamesEnabled = niceColumnNamesEnabled )) } #' #' @title #' Names of a Analysis Results Object #' #' @description #' Function to get the names of an \code{\link{AnalysisResults}} object. #' #' @param x An \code{\link{AnalysisResults}} object created by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @details #' Returns the names of an analysis results that can be accessed by the user. #' #' @template return_names #' #' @export #' #' @keywords internal #' names.AnalysisResults <- function(x) { namesToShow <- c(".design", ".dataInput", ".stageResults", ".conditionalPowerResults") if (.isMultiArmAnalysisResults(x)) { namesToShow <- c(namesToShow, ".closedTestResults") } namesToShow <- c(namesToShow, x$.getVisibleFieldNames()) return(namesToShow) } #' #' @name AnalysisResultsGroupSequential #' #' @title #' Analysis Results Group Sequential #' #' @description #' Class for analysis results results based on a group sequential design. #' #' @template field_normalApproximation #' @template field_directionUpper #' @template field_thetaH0 #' @template field_pi1 #' @template field_pi2 #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_thetaH1 #' @template field_assumedStDev #' @template field_equalVariances #' @template field_testActions #' @template field_conditionalRejectionProbabilities #' @template field_conditionalPower #' @template field_repeatedConfidenceIntervalLowerBounds #' @template field_repeatedConfidenceIntervalUpperBounds #' @template field_repeatedPValues #' @template field_finalStage #' @template field_finalPValues #' @template field_finalConfidenceIntervalLowerBounds #' @template field_finalConfidenceIntervalUpperBounds #' @template field_medianUnbiasedEstimates #' @template field_maxInformation #' @template field_informationEpsilon #' #' @details #' This object cannot be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the analysis results of a group sequential design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsGroupSequential <- setRefClass("AnalysisResultsGroupSequential", contains = "AnalysisResultsBase", fields = list( maxInformation = "integer", informationEpsilon = "numeric" ), methods = list( initialize = function(design, dataInput, ...) { callSuper(design = design, dataInput = dataInput, ...) .setParameterType("maxInformation", C_PARAM_NOT_APPLICABLE) .setParameterType("informationEpsilon", C_PARAM_NOT_APPLICABLE) } ) ) #' #' @name AnalysisResultsInverseNormal #' #' @title #' Analysis Results Inverse Normal #' #' @description #' Class for analysis results results based on an inverse normal design. #' #' @template field_normalApproximation #' @template field_directionUpper #' @template field_thetaH0 #' @template field_pi1 #' @template field_pi2 #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_thetaH1 #' @template field_assumedStDev #' @template field_equalVariances #' @template field_testActions #' @template field_conditionalRejectionProbabilities #' @template field_conditionalPower #' @template field_repeatedConfidenceIntervalLowerBounds #' @template field_repeatedConfidenceIntervalUpperBounds #' @template field_repeatedPValues #' @template field_finalStage #' @template field_finalPValues #' @template field_finalConfidenceIntervalLowerBounds #' @template field_finalConfidenceIntervalUpperBounds #' @template field_medianUnbiasedEstimates #' #' @details #' This object cannot be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the analysis results of a inverse normal design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsInverseNormal <- setRefClass("AnalysisResultsInverseNormal", contains = "AnalysisResultsBase" ) #' #' @name AnalysisResultsMultiArmInverseNormal #' #' @title #' Analysis Results Multi-Arm Inverse Normal #' #' @description #' Class for multi-arm analysis results based on a inverse normal design. #' #' @template field_normalApproximation #' @template field_directionUpper #' @template field_thetaH0 #' @template field_pi1 #' @template field_pi2 #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_thetaH1 #' @template field_assumedStDevs #' @template field_piTreatments #' @template field_intersectionTest #' @template field_varianceOption #' @template field_conditionalRejectionProbabilities #' @template field_conditionalPower #' @template field_repeatedConfidenceIntervalLowerBounds #' @template field_repeatedConfidenceIntervalUpperBounds #' @template field_repeatedPValues #' @template field_piControl #' #' @details #' This object cannot be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the multi-arm analysis results of an inverse normal design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsMultiArmInverseNormal <- setRefClass("AnalysisResultsMultiArmInverseNormal", contains = "AnalysisResultsMultiArm" ) #' #' @name AnalysisResultsEnrichmentInverseNormal #' #' @title #' Analysis Results Enrichment Inverse Normal #' #' @description #' Class for enrichment analysis results based on a inverse normal design. #' #' @template field_normalApproximation #' @template field_directionUpper #' @template field_thetaH0 #' @template field_pi1 #' @template field_pi2 #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_thetaH1 #' @template field_assumedStDevs #' @template field_piTreatments #' @template field_intersectionTest #' @template field_varianceOption #' @template field_conditionalRejectionProbabilities #' @template field_conditionalPower #' @template field_repeatedConfidenceIntervalLowerBounds #' @template field_repeatedConfidenceIntervalUpperBounds #' @template field_repeatedPValues #' @template field_piControls #' @template field_stratifiedAnalysis #' #' @details #' This object cannot be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the enrichment analysis results of an inverse normal design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsEnrichmentInverseNormal <- setRefClass("AnalysisResultsEnrichmentInverseNormal", contains = "AnalysisResultsEnrichment", fields = list( stratifiedAnalysis = "logical" ) ) #' #' @name AnalysisResultsFisher #' #' @title #' Analysis Results Fisher #' #' @description #' Class for analysis results based on a Fisher combination test design. #' #' @template field_normalApproximation #' @template field_directionUpper #' @template field_thetaH0 #' @template field_pi1 #' @template field_pi2 #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_thetaH1 #' @template field_assumedStDev #' @template field_equalVariances #' @template field_testActions #' @template field_conditionalRejectionProbabilities #' @template field_conditionalPower #' @template field_repeatedConfidenceIntervalLowerBounds #' @template field_repeatedConfidenceIntervalUpperBounds #' @template field_repeatedPValues #' @template field_finalStage #' @template field_finalPValues #' @template field_finalConfidenceIntervalLowerBounds #' @template field_finalConfidenceIntervalUpperBounds #' @template field_medianUnbiasedEstimates #' @template field_conditionalPowerSimulated #' @template field_iterations #' @template field_seed #' #' @details #' This object cannot be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the analysis results of a Fisher combination test design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsFisher <- setRefClass("AnalysisResultsFisher", contains = "AnalysisResultsBase", fields = list( conditionalPowerSimulated = "numeric", iterations = "integer", seed = "numeric" ), methods = list( initialize = function(design, dataInput, ...) { callSuper(design = design, dataInput = dataInput, ...) conditionalPowerSimulated <<- -1 } ) ) #' #' @title #' Analysis Results Multi-Arm Fisher #' #' @description #' Class for multi-arm analysis results based on a Fisher combination test design. #' #' @template field_normalApproximation #' @template field_directionUpper #' @template field_thetaH0 #' @template field_pi1 #' @template field_pi2 #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_thetaH1 #' @template field_assumedStDevs #' @template field_piTreatments #' @template field_intersectionTest #' @template field_varianceOption #' @template field_conditionalRejectionProbabilities #' @template field_conditionalPower #' @template field_repeatedConfidenceIntervalLowerBounds #' @template field_repeatedConfidenceIntervalUpperBounds #' @template field_repeatedPValues #' @template field_piControl #' @template field_conditionalPowerSimulated #' @template field_iterations #' @template field_seed #' #' @details #' This object cannot be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the multi-arm analysis results of a Fisher combination test design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsMultiArmFisher <- setRefClass("AnalysisResultsMultiArmFisher", contains = "AnalysisResultsMultiArm", fields = list( conditionalPowerSimulated = "matrix", iterations = "integer", seed = "numeric" ) ) #' #' @name AnalysisResultsEnrichmentFisher #' #' @title #' Analysis Results Enrichment Fisher #' #' @description #' Class for enrichment analysis results based on a Fisher combination test design. #' #' @template field_normalApproximation #' @template field_directionUpper #' @template field_thetaH0 #' @template field_pi1 #' @template field_pi2 #' @template field_nPlanned #' @template field_thetaH1 #' @template field_assumedStDevs #' @template field_piTreatments #' @template field_intersectionTest #' @template field_varianceOption #' @template field_conditionalRejectionProbabilities #' @template field_repeatedConfidenceIntervalLowerBounds #' @template field_repeatedConfidenceIntervalUpperBounds #' @template field_repeatedPValues #' @template field_piControls #' @template field_conditionalPowerSimulated #' @template field_iterations #' @template field_seed #' @template field_stratifiedAnalysis #' #' #' @details #' This object cannot be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the multi-arm analysis results of a Fisher combination test design. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_analysis_dataset.R #' @include class_design.R #' @include f_core_constants.R #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsEnrichmentFisher <- setRefClass("AnalysisResultsEnrichmentFisher", contains = "AnalysisResultsEnrichment", fields = list( conditionalPowerSimulated = "matrix", iterations = "integer", seed = "numeric", stratifiedAnalysis = "logical" ) ) #' #' @name AnalysisResultsConditionalDunnett #' #' @title #' Analysis Results Multi-Arm Conditional Dunnett #' #' @description #' Class for multi-arm analysis results based on a conditional Dunnett test design. #' #' @template field_normalApproximation #' @template field_directionUpper #' @template field_thetaH0 #' @template field_pi1 #' @template field_pi2 #' @template field_nPlanned #' @template field_allocationRatioPlanned #' @template field_thetaH1 #' @template field_assumedStDevs #' @template field_piTreatments #' @template field_intersectionTest #' @template field_varianceOption #' @template field_conditionalRejectionProbabilities #' @template field_conditionalPower #' @template field_repeatedConfidenceIntervalLowerBounds #' @template field_repeatedConfidenceIntervalUpperBounds #' @template field_repeatedPValues #' @template field_piControl #' #' @details #' This object cannot be created directly; use \code{\link{getAnalysisResults}} #' with suitable arguments to create the multi-arm analysis results of a conditional Dunnett test design. #' #' @keywords internal #' #' @importFrom methods new #' AnalysisResultsConditionalDunnett <- setRefClass("AnalysisResultsConditionalDunnett", contains = "AnalysisResultsMultiArm", fields = list() ) .getAnalysisResultsPlotArguments <- function(x, nPlanned = NA_real_, allocationRatioPlanned = NA_real_) { if (all(is.na(nPlanned))) { nPlanned <- stats::na.omit(x$nPlanned) } if (is.na(allocationRatioPlanned) && length(x$allocationRatioPlanned) == 1) { allocationRatioPlanned <- x$allocationRatioPlanned } if (length(allocationRatioPlanned) != 1) { allocationRatioPlanned <- NA_real_ } if ((.isConditionalPowerEnabled(x$nPlanned) || .isConditionalPowerEnabled(nPlanned)) && is.na(allocationRatioPlanned)) { allocationRatioPlanned <- 1 } return(list( stageResults = x$.stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned )) } .getConfidenceIntervalPlotLegendLabels <- function(x, treatmentArmsToShow) { if (.isEnrichmentAnalysisResults(x)) { gMax <- x$.stageResults$getGMax() labels <- paste0("S", treatmentArmsToShow) labels[treatmentArmsToShow == gMax] <- "F" labels <- factor(labels, levels = unique(labels)) return(labels) } return(paste0(treatmentArmsToShow, " vs control")) } .getConfidenceIntervalData <- function(x, treatmentArmsToShow = NULL) { data <- .getConfidenceIntervalDataPerBound(x, "lower", treatmentArmsToShow) data$upper <- .getConfidenceIntervalDataPerBound(x, "upper", treatmentArmsToShow)$upper data$yValues <- (data$upper + data$lower) / 2 data <- na.omit(data) return(data) } .getConfidenceIntervalDataPerBound <- function(x, ciName = c("lower", "upper"), treatmentArmsToShow = NULL) { ciName <- match.arg(ciName) paramName <- ifelse(ciName == "lower", "repeatedConfidenceIntervalLowerBounds", "repeatedConfidenceIntervalUpperBounds") data <- x[[paramName]] if (is.matrix(data) && !is.null(treatmentArmsToShow) && length(treatmentArmsToShow) > 0 && !any(is.na(treatmentArmsToShow))) { data <- data[treatmentArmsToShow, ] } if (is.matrix(data) && nrow(data) == 1) { data <- as.numeric(data) } if (is.matrix(data)) { kMax <- ncol(data) if (is.null(treatmentArmsToShow) || length(treatmentArmsToShow) == 0 || all(is.na(treatmentArmsToShow))) { treatmentArmsToShow <- 1:nrow(data) } groups <- length(treatmentArmsToShow) result <- data.frame(ci = data[, 1]) colnames(result) <- ciName result$xValues <- rep(1, groups) result$categories <- .getConfidenceIntervalPlotLegendLabels(x, treatmentArmsToShow) if (kMax == 1) { return(result) } for (stage in 2:kMax) { resultPart <- data.frame(ci = data[, stage]) colnames(resultPart) <- ciName resultPart$xValues <- rep(stage, groups) resultPart$categories <- .getConfidenceIntervalPlotLegendLabels(x, treatmentArmsToShow) result <- rbind(result, resultPart) } return(result) } if (is.null(treatmentArmsToShow) || length(treatmentArmsToShow) == 0 || all(is.na(treatmentArmsToShow))) { treatmentArmsToShow <- 1 } kMax <- length(data) result <- data.frame(ci = data) colnames(result) <- ciName result$xValues <- 1:kMax result$categories <- rep(.getConfidenceIntervalPlotLegendLabels(x, treatmentArmsToShow), kMax) return(result) } #' #' @title #' Analysis Results Plotting #' #' @description #' Plots the conditional power together with the likelihood function. #' #' @param x The analysis results at given stage, obtained from \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @inheritParams param_nPlanned #' @inheritParams param_stage #' @inheritParams param_allocationRatioPlanned #' @param main The main title, default is \code{"Dataset"}. #' @param xlab The x-axis label, default is \code{"Stage"}. #' @param ylab The y-axis label. #' @param legendTitle The legend title, default is \code{""}. #' @inheritParams param_palette #' @inheritParams param_showSource #' @inheritParams param_plotSettings #' @inheritParams param_legendPosition #' @inheritParams param_grid #' @param type The plot type (default = 1). Note that at the moment only one type (the conditional power plot) is available. #' @param ... Optional \link[=param_three_dots_plot]{plot arguments}. Furthermore the following arguments can be defined: #' \itemize{ #' \item \code{thetaRange}: A range of assumed effect sizes if testing means or a survival design was specified. #' Additionally, if testing means was selected, \code{assumedStDev} (assumed standard deviation) #' can be specified (default is \code{1}). #' \item \code{piTreatmentRange}: A range of assumed rates pi1 to calculate the conditional power. #' Additionally, if a two-sample comparison was selected, \code{pi2} can be specified (default is the value from #' \code{\link[=getAnalysisResults]{getAnalysisResults()}}). #' \item \code{directionUpper}: Specifies the direction of the alternative, #' only applicable for one-sided testing; default is \code{TRUE} #' which means that larger values of the test statistics yield smaller p-values. #' \item \code{\link[=param_thetaH0]{thetaH0}}: The null hypothesis value, default is \code{0} for #' the normal and the binary case, it is \code{1} for the survival case. #' For testing a rate in one sample, a value thetaH0 in (0, 1) has to be specified for #' defining the null hypothesis H0: \code{pi = thetaH0}. #' } #' #' @details #' The conditional power is calculated only if effect size and sample size is specified. #' #' @template return_object_ggplot #' #' @template examples_plot_analysis_results #' #' @export #' plot.AnalysisResults <- function(x, y, ..., type = 1L, nPlanned = NA_real_, allocationRatioPlanned = NA_real_, main = NA_character_, xlab = NA_character_, ylab = NA_character_, legendTitle = NA_character_, palette = "Set1", legendPosition = NA_integer_, showSource = FALSE, grid = 1, plotSettings = NULL) { .assertGgplotIsInstalled() functionCall <- match.call(expand.dots = TRUE) analysisResultsName <- as.character(functionCall$x)[1] .assertIsSingleInteger(grid, "grid", validateType = FALSE) typeNumbers <- .getPlotTypeNumber(type, x) p <- NULL plotList <- list() for (typeNumber in typeNumbers) { p <- .plotAnalysisResults( x = x, y = y, type = typeNumber, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, main = main, xlab = xlab, ylab = ylab, legendTitle = legendTitle, palette = palette, legendPosition = legendPosition, showSource = showSource, functionCall = functionCall, analysisResultsName = analysisResultsName, plotSettings = plotSettings, ... ) .printPlotShowSourceSeparator(showSource, typeNumber, typeNumbers) if (length(typeNumbers) > 1) { caption <- .getPlotCaption(x, typeNumber, stopIfNotFound = TRUE) plotList[[caption]] <- p } } if (length(typeNumbers) == 1) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(p)) } return(p) } if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(plotList)) } return(.createPlotResultObject(plotList, grid)) } .plotAnalysisResultsRCI <- function(..., x, y, nPlanned, allocationRatioPlanned, main, xlab, ylab, legendTitle, palette, legendPosition, showSource, analysisResultsName, plotSettings = NULL) { .assertIsAnalysisResults(x) .warnInCaseOfUnknownArguments(functionName = "plot", ignore = c("treatmentArms", "populations"), ...) if (.isEnrichmentAnalysisResults(x)) { gMax <- x$.stageResults$getGMax() treatmentArmsToShow <- .getPopulationsToShow(x, gMax = gMax, ...) } else { treatmentArmsToShow <- .getTreatmentArmsToShow(x, ...) } data <- .getConfidenceIntervalData(x, treatmentArmsToShow) if (nrow(data) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "unable to create plot because no RCIs are available in the specified analysis result" ) } .warnInCaseOfUnusedArgument(nPlanned, "nPlanned", NA_real_, "plot") .warnInCaseOfUnusedArgument(allocationRatioPlanned, "allocationRatioPlanned", NA_real_, "plot") plotData <- list( main = "Repeated Confidence Intervals", xlab = "Stage", ylab = "RCI", sub = NA_character_ # subtitle ) if (is.na(legendPosition)) { if (!.isMultiHypothesesAnalysisResults(x)) { legendPosition <- ifelse(length(treatmentArmsToShow) == 1 && treatmentArmsToShow == 1, -1, C_POSITION_RIGHT_CENTER ) } else { legendPosition <- C_POSITION_RIGHT_TOP } } treatmentArmsToShowCmd <- "" if (!is.null(treatmentArmsToShow) && !identical(sort(unique(treatmentArmsToShow)), 1:nrow(data))) { treatmentArmsToShowCmd <- paste0(", ", .arrayToString(treatmentArmsToShow, mode = "vector")) } dataCmd <- paste0("rpact:::.getConfidenceIntervalData(", analysisResultsName, treatmentArmsToShowCmd, ")") srcCmd <- .showPlotSourceInformation( objectName = analysisResultsName, xParameterName = paste0(dataCmd, "$xValues"), yParameterNames = c( paste0(dataCmd, "$lower"), paste0(dataCmd, "$yValues"), paste0(dataCmd, "$upper") ), type = 2L, showSource = showSource, lineType = FALSE ) p <- .createAnalysisResultsPlotObject(x, data = data, plotData = plotData, main = main, xlab = xlab, ylab = ylab, legendTitle = legendTitle, palette = palette, legendPosition = legendPosition, kMax = x$.design$kMax, plotSettings = plotSettings ) p <- p + ggplot2::expand_limits(x = c(1, x$.design$kMax)) return(p) } .plotAnalysisResults <- function(..., x, y, type, nPlanned, allocationRatioPlanned, main, xlab, ylab, legendTitle, palette, legendPosition, showSource, functionCall, analysisResultsName, plotSettings = NULL) { .assertIsSingleInteger(type, "type", naAllowed = FALSE, validateType = FALSE) if (!(type %in% c(1, 2))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1 or 2") } .assertIsAnalysisResults(x) .assertIsValidLegendPosition(legendPosition = legendPosition) if (type == 2) { return(.plotAnalysisResultsRCI( x = x, y = y, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, main = main, xlab = xlab, ylab = ylab, legendTitle = legendTitle, palette = palette, legendPosition = legendPosition, showSource = showSource, analysisResultsName = analysisResultsName, plotSettings = plotSettings, ... )) } if (!.isConditionalPowerEnabled(x$nPlanned) && !.isConditionalPowerEnabled(nPlanned)) { stop("'nPlanned' must be defined to create conditional power plot") } .warnInCaseOfUnknownArguments( functionName = "plot", ignore = c("thetaRange", "assumedStDev", "assumedStDevs", "treatmentArms", "populations", "pi2", "piTreatmentRange"), ... ) if (is.na(legendPosition)) { legendPosition <- C_POSITION_RIGHT_CENTER } plotArgs <- .getAnalysisResultsPlotArguments( x = x, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) functionCall$x <- x$.stageResults functionCall$y <- NULL functionCall$stageResultsName <- paste0(analysisResultsName, "$.stageResults") functionCall$nPlanned <- plotArgs$nPlanned functionCall$main <- main functionCall$xlab <- xlab functionCall$ylab <- ylab functionCall$legendTitle <- legendTitle functionCall$palette <- palette functionCall$legendPosition <- legendPosition functionCall$type <- type functionCall$plotSettings <- plotSettings functionCall$allocationRatioPlanned <- plotArgs$allocationRatioPlanned if (.isTrialDesignFisher(x$.design)) { functionCall$iterations <- x$iterations functionCall$seed <- x$seed } if (x$getDataInput()$isDatasetMeans()) { if (.isMultiHypothesesAnalysisResults(x)) { assumedStDevs <- eval.parent(functionCall$assumedStDevs) if (is.null(assumedStDevs)) { assumedStDevs <- as.numeric(x$assumedStDevs) } gMax <- x$.stageResults$getGMax() .assertIsValidAssumedStDevs(assumedStDevs, gMax) functionCall$assumedStDevs <- assumedStDevs } else { assumedStDev <- eval.parent(functionCall$assumedStDev) if (is.null(assumedStDev)) { assumedStDev <- x$assumedStDev } functionCall$assumedStDev <- assumedStDev } } if (x$getDataInput()$isDatasetMeans() || x$getDataInput()$isDatasetSurvival()) { thetaRange <- eval.parent(functionCall$thetaRange) if (is.null(thetaRange)) { thetaRangeMin <- min(x$thetaH0, min(na.omit(as.numeric(x$thetaH1)))) thetaRangeMax <- 2 * max(x$thetaH0, max(na.omit(as.numeric(x$thetaH1)))) thetaRange <- seq( thetaRangeMin, thetaRangeMax, (thetaRangeMax - thetaRangeMin) / C_THETA_RANGE_SEQUENCE_LENGTH_DEFAULT ) } else { thetaRange <- .assertIsValidThetaRange( thetaRange = thetaRange, survivalDataEnabled = x$getDataInput()$isDatasetSurvival() ) } functionCall$thetaRange <- thetaRange } else if (x$getDataInput()$isDatasetRates()) { if (.isMultiArmAnalysisResults(x)) { piControl <- eval.parent(functionCall$piControl) if (is.null(piControl)) { piControl <- as.numeric(x$piControl) } functionCall$piControl <- piControl } else if (.isEnrichmentAnalysisResults(x)) { piControl <- eval.parent(functionCall$piControl) if (is.null(piControl)) { piControls <- as.numeric(x$piControls) } functionCall$piControls <- piControls } else { pi2 <- eval.parent(functionCall$pi2) if (is.null(pi2)) { pi2 <- x$pi2 } functionCall$pi2 <- pi2 } piTreatmentRange <- eval.parent(functionCall$piTreatmentRange) if (is.null(piTreatmentRange)) { piTreatmentRange <- seq(0, 1, 1 / C_THETA_RANGE_SEQUENCE_LENGTH_DEFAULT) # default } else { piTreatmentRange <- .assertIsValidPiTreatmentRange(piTreatmentRange = piTreatmentRange) } functionCall$piTreatmentRange <- piTreatmentRange } functionCall[[1L]] <- as.name("plot") return(eval.parent(functionCall)) } rpact/R/f_analysis_utilities.R0000644000176200001440000013345314446314505016151 0ustar liggesusers## | ## | *Analysis of multi-arm designs with adaptive test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7133 $ ## | Last changed: $Date: 2023-06-26 15:57:24 +0200 (Mo, 26 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R NULL .getGMaxFromAnalysisResult <- function(results) { return(nrow(results$.stageResults$testStatistics)) } .setNPlanned <- function(results, nPlanned) { design <- results$.design if (design$kMax == 1) { if (.isConditionalPowerEnabled(nPlanned)) { warning("'nPlanned' (", .arrayToString(nPlanned), ") ", "will be ignored because design is fixed", call. = FALSE ) } results$.setParameterType("nPlanned", C_PARAM_NOT_APPLICABLE) } .setValueAndParameterType(results, "nPlanned", nPlanned, NA_real_) while (length(results$nPlanned) < design$kMax) { results$nPlanned <- c(NA_real_, results$nPlanned) } if (all(is.na(results$nPlanned))) { results$.setParameterType("nPlanned", C_PARAM_NOT_APPLICABLE) } } .isConditionalPowerEnabled <- function(nPlanned) { return(!is.null(nPlanned) && length(nPlanned) > 0 && !all(is.na(nPlanned))) } .warnInCaseOfUnusedConditionalPowerArgument <- function(results, nPlanned, paramName, paramValues) { if (!.isConditionalPowerEnabled(nPlanned)) { if (length(paramValues) > 0 && !all(is.na(paramValues)) && results$.getParameterType(paramName) != C_PARAM_GENERATED) { warning("'", paramName, "' (", .arrayToString(paramValues), ") ", "will be ignored because 'nPlanned' is not defined", call. = FALSE ) } return(invisible()) } if (results$.design$kMax == 1) { if (length(paramValues) > 0 && !all(is.na(paramValues)) && results$.getParameterType(paramName) != C_PARAM_GENERATED) { warning("'", paramName, "' (", .arrayToString(paramValues), ") ", "will be ignored because design is fixed", call. = FALSE ) } return(invisible()) } } .setNPlannedAndThetaH1 <- function(results, nPlanned, thetaH1) { .setNPlanned(results, nPlanned) .warnInCaseOfUnusedConditionalPowerArgument(results, nPlanned, "thetaH1", thetaH1) if (!is.matrix(results$thetaH1)) { if (results$.getParameterType("thetaH1") %in% c(C_PARAM_TYPE_UNKNOWN, C_PARAM_NOT_APPLICABLE)) { .setValueAndParameterType(results, "thetaH1", thetaH1, NA_real_) } else { results$thetaH1 <- thetaH1 if (results$.getParameterType("thetaH1") == C_PARAM_TYPE_UNKNOWN) { results$.setParameterType("thetaH1", C_PARAM_USER_DEFINED) } } } else { if (results$.getParameterType("thetaH1") %in% c(C_PARAM_TYPE_UNKNOWN, C_PARAM_NOT_APPLICABLE)) { .setValueAndParameterType(results, "thetaH1", value = matrix(thetaH1, ncol = 1), defaultValue = matrix(rep(NA_real_, .getGMaxFromAnalysisResult(results)), ncol = 1) ) } else { results$thetaH1 <- matrix(thetaH1, ncol = 1) if (results$.getParameterType("thetaH1") == C_PARAM_TYPE_UNKNOWN) { results$.setParameterType("thetaH1", C_PARAM_USER_DEFINED) } } } } .setNPlannedAndThetaH1AndAssumedStDev <- function(results, nPlanned, thetaH1, assumedStDev) { .setNPlannedAndThetaH1(results, nPlanned, thetaH1) .warnInCaseOfUnusedConditionalPowerArgument(results, nPlanned, "assumedStDev", assumedStDev) if (results$.getParameterType("assumedStDev") %in% c(C_PARAM_TYPE_UNKNOWN, C_PARAM_NOT_APPLICABLE)) { .setValueAndParameterType(results, "assumedStDev", assumedStDev, NA_real_) } else { results$assumedStDev <- assumedStDev if (results$.getParameterType("assumedStDev") == C_PARAM_TYPE_UNKNOWN) { results$.setParameterType("assumedStDev", C_PARAM_USER_DEFINED) } } } .setNPlannedAndThetaH1AndAssumedStDevs <- function(results, nPlanned, thetaH1, assumedStDevs) { .setNPlannedAndThetaH1(results, nPlanned, thetaH1) .warnInCaseOfUnusedConditionalPowerArgument(results, nPlanned, "assumedStDevs", assumedStDevs) if (results$.getParameterType("assumedStDevs") %in% c(C_PARAM_TYPE_UNKNOWN, C_PARAM_NOT_APPLICABLE)) { .setValueAndParameterType(results, "assumedStDevs", value = matrix(assumedStDevs, ncol = 1), defaultValue = matrix(rep(NA_real_, .getGMaxFromAnalysisResult(results)), ncol = 1) ) } else { results$assumedStDevs <- matrix(assumedStDevs, ncol = 1) if (results$.getParameterType("assumedStDevs") == C_PARAM_TYPE_UNKNOWN) { results$.setParameterType("assumedStDevs", C_PARAM_USER_DEFINED) } } } .setNPlannedAndPi <- function(results, nPlanned, piControlName, piControlValues, piTreatments) { .setNPlanned(results, nPlanned) .warnInCaseOfUnusedConditionalPowerArgument(results, nPlanned, piControlName, piControlValues) .warnInCaseOfUnusedConditionalPowerArgument(results, nPlanned, "piTreatments", piTreatments) if (results$.getParameterType(piControlName) %in% c(C_PARAM_TYPE_UNKNOWN, C_PARAM_NOT_APPLICABLE)) { .setValueAndParameterType( results, piControlName, matrix(piControlValues, ncol = 1), matrix(rep(NA_real_, .getGMaxFromAnalysisResult(results)), ncol = 1) ) } else { results[[piControlName]] <- matrix(piControlValues, ncol = 1) if (results$.getParameterType(piControlName) == C_PARAM_TYPE_UNKNOWN) { results$.setParameterType(piControlName, C_PARAM_USER_DEFINED) } } if (results$.getParameterType("piTreatments") == C_PARAM_TYPE_UNKNOWN) { .setValueAndParameterType( results, "piTreatments", matrix(piTreatments, ncol = 1), matrix(rep(NA_real_, .getGMaxFromAnalysisResult(results)), ncol = 1) ) } else { results$piTreatments <- matrix(piTreatments, ncol = 1) if (results$.getParameterType("piTreatments") == C_PARAM_TYPE_UNKNOWN) { results$.setParameterType("piTreatments", C_PARAM_USER_DEFINED) } } } .getSortedSubsets <- function(subsets) { return(subsets[with(data.frame(subsets = subsets, index = as.integer(sub("\\D", "", subsets))), order(index))]) } .getAllAvailableSubsets <- function(numbers, ..., sort = TRUE, digits = NA_integer_) { if (length(numbers) == 0) { return(character(0)) } results <- paste0(numbers, collapse = "") for (n in numbers) { results <- c(results, .getAllAvailableSubsets(numbers[numbers != n], sort = sort)) } if (!is.na(digits)) { results <- results[nchar(results) == digits] } if (!sort) { return(unique(results)) } return(.getSortedSubsets(unique(results))) } .createSubsetsByGMax <- function(gMax, ..., stratifiedInput = TRUE, subsetIdPrefix = "S", restId = ifelse(stratifiedInput, "R", "F"), all = TRUE) { .assertIsSingleInteger(gMax, "gMax", validateType = FALSE) .assertIsInClosedInterval(gMax, "gMax", lower = 1, upper = 10) if (gMax == 1) { subsetName <- paste0(subsetIdPrefix, 1) subsetName <- ifelse(stratifiedInput, subsetName, "F") if (!all) { return(subsetName) } return(list(subsetName)) } numbers <- 1:(gMax - 1) subsets <- list() if (stratifiedInput) { availableSubsets <- paste0(subsetIdPrefix, .getAllAvailableSubsets(numbers)) } else { availableSubsets <- paste0(subsetIdPrefix, numbers) } for (i in numbers) { subset <- availableSubsets[grepl(i, availableSubsets)] subsets[[length(subsets) + 1]] <- subset } if (stratifiedInput) { subsets[[length(subsets) + 1]] <- c(availableSubsets, restId) } else { subsets[[length(subsets) + 1]] <- restId } if (!all) { if (!stratifiedInput) { return(unlist(subsets)) } return(subsets[[gMax]]) } return(subsets) } .arraysAreEqual <- function(a1, a2) { if (length(a1) != length(a2)) { return(FALSE) } l <- length(a1) if (l > 0) { a1 <- sort(a1) a2 <- sort(a2) if (sum(a1 == a2) < l) { return(FALSE) } } return(TRUE) } .getNumberOfGroupsFromArgumentNames <- function(argNames) { numbers <- gsub("\\D", "", argNames) numbers <- numbers[numbers != ""] return(ifelse(length(numbers) == 0, 1, max(as.numeric(numbers)))) } .getGroupNumberFromArgumentName <- function(argName) { n <- gsub("\\D", "", argName) return(ifelse(n == "", 1, as.numeric(n))) } .isControlGroupArgument <- function(argName, numberOfGroups) { if (numberOfGroups <= 2) { return(FALSE) } return(ifelse(numberOfGroups == 1, FALSE, .getGroupNumberFromArgumentName(argName) == numberOfGroups)) } .naOmitBackward <- function(x) { indices <- which(is.na(x)) if (length(indices) == 0) { return(x) } if (length(x) == 1 || !is.na(x[length(x)])) { return(x) } if (length(indices) == 1) { return(x[1:(length(x) - 1)]) } indexBefore <- NA_real_ for (i in length(indices):1) { index <- indices[i] if (!is.na(indexBefore) && index != indexBefore - 1) { return(x[1:(indexBefore - 1)]) } indexBefore <- index } if (!is.na(indexBefore)) { return(x[1:(indexBefore - 1)]) } return(x) } .getNumberOfStagesFromArguments <- function(args, argNames) { numberOfStages <- 1 for (argName in argNames) { argValues <- args[[argName]] n <- length(.naOmitBackward(argValues)) if (n > numberOfStages) { numberOfStages <- n } } return(numberOfStages) } .getNumberOfSubsetsFromArguments <- function(args, argNames) { numberOfSubsets <- 1 for (argName in argNames) { argValues <- args[[argName]] n <- length(na.omit(argValues)) if (n > numberOfSubsets) { numberOfSubsets <- n } } return(numberOfSubsets) } .assertIsValidTreatmentArmArgumentDefined <- function(args, argNames, numberOfGroups, numberOfStages) { tratmentArgNames <- argNames[!grepl(paste0(".*\\D{1}", numberOfGroups, "$"), argNames)] for (argName in tratmentArgNames) { argValues <- args[[argName]] if (!is.null(argValues) && length(.naOmitBackward(argValues)) == numberOfStages) { return(invisible()) } } stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "at least for one treatment arm the values for ", numberOfStages, " stages must be defined ", "because the control arm defines ", numberOfStages, " stages" ) } .createDataFrame <- function(...) { args <- list(...) args <- .removeDesignFromArgs(args) argNames <- .getArgumentNames(...) if (length(args) == 0 || length(argNames) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "data.frame or data vectors expected") } multiArmEnabled <- any(grep("3", argNames)) numberOfGroups <- .getNumberOfGroupsFromArgumentNames(argNames) numberOfStages <- .getNumberOfStagesFromArguments(args, argNames) survivalDataEnabled <- .isDataObjectSurvival(...) enrichmentEnabled <- .isDataObjectEnrichment(...) numberOfSubsets <- 1 if (enrichmentEnabled) { numberOfSubsets <- .getNumberOfSubsetsFromArguments(args, argNames) } if (multiArmEnabled) { .assertIsValidTreatmentArmArgumentDefined(args, argNames, numberOfGroups, numberOfStages) } numberOfValues <- length(args[[1]]) naIndicesBefore <- NULL if (!survivalDataEnabled && multiArmEnabled) { naIndicesBefore <- list() } for (argName in argNames) { argValues <- args[[argName]] if (is.null(argValues) || length(argValues) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argName, "' is not a valid numeric vector" ) } if (is.na(argValues[1])) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argName, "' is NA at first stage; a valid numeric value must be specified at stage 1" ) } if (length(argValues) != numberOfValues) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "all data vectors must have the same length: '", argName, "' (", length(argValues), ") differs from '", argNames[1], "' (", numberOfValues, ")" ) } if (.equalsRegexpIgnoreCase(argName, "^stages?$")) { if (length(stats::na.omit(argValues)) != length(argValues)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "NA's not allowed for '", argName, "'; stages must be defined completely" ) } definedStages <- sort(intersect(unique(argValues), 1:numberOfValues)) if (length(definedStages) < numberOfValues) { if (length(definedStages) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "no valid stages are defined; ", "stages must be defined completely (", .arrayToString(1:numberOfValues), ")" ) } if (!enrichmentEnabled) { msg <- ifelse(length(definedStages) == 1, paste0("only stage ", definedStages, " is defined"), paste0("only stages ", .arrayToString(definedStages), " are defined") ) stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, msg, "; stages must be defined completely") } } } if (!survivalDataEnabled && .isControlGroupArgument(argName, numberOfGroups) && length(na.omit(argValues)) < numberOfStages) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "control group '", argName, "' (", .arrayToString(argValues, digits = 2), ") must be defined for all stages" ) } naIndices <- which(is.na(argValues)) if (length(naIndices) > 0) { stageIndex <- naIndices[length(naIndices)] if (stageIndex != numberOfValues) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argName, "' contains a NA at stage ", stageIndex, " followed by a value for a higher stage; NA's must be the last values" ) } } if (length(naIndices) > 1 && !enrichmentEnabled) { indexBefore <- naIndices[length(naIndices)] for (i in (length(naIndices) - 1):1) { index <- naIndices[i] if (indexBefore - index > 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", argName, "' contains alternating values and NA's; ", "NA's must be the last values" ) } indexBefore <- index } } if (!enrichmentEnabled) { if (!multiArmEnabled && !survivalDataEnabled) { if (!is.null(naIndicesBefore) && !.equalsRegexpIgnoreCase(argName, "^stages?$")) { if (!.arraysAreEqual(naIndicesBefore, naIndices)) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "inconsistent NA definition; ", "if NA's exist, then they are mandatory for each group at the same stage" ) } } naIndicesBefore <- naIndices } else { groupNumber <- .getGroupNumberFromArgumentName(argName) if (!is.null(naIndicesBefore[[as.character(groupNumber)]]) && !.equalsRegexpIgnoreCase(argName, "^stages?$") && !.isControlGroupArgument(argName, numberOfGroups)) { if (!.arraysAreEqual(naIndicesBefore[[as.character(groupNumber)]], naIndices)) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "values of treatment ", groupNumber, " not correctly specified; ", "if NA's exist, then they are mandatory for each parameter at the same stage" ) } } if (!.isControlGroupArgument(argName, numberOfGroups)) { naIndicesBefore[[as.character(groupNumber)]] <- naIndices } } } if (sum(is.infinite(argValues)) > 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all data values must be finite; ", "'", argName, "' contains infinite values" ) } if (!any(grepl(paste0("^", sub("\\d*$", "", argName), "$"), C_KEY_WORDS_SUBSETS)) && !is.numeric(argValues)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all data vectors must be numeric ('", argName, "' is ", .getClassName(argValues), ")" ) } if (length(argValues) > C_KMAX_UPPER_BOUND * numberOfSubsets) { stop( C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "'", argName, "' is out of bounds [1, ", C_KMAX_UPPER_BOUND, "]" ) } } if (!enrichmentEnabled) { for (groupNumber in 1:numberOfGroups) { groupVars <- argNames[grepl(paste0("\\D", groupNumber, "$"), argNames)] naIndicesBefore <- NULL for (argName in groupVars) { argValues <- args[[argName]] naIndices <- which(is.na(argValues)) if (!is.null(naIndicesBefore) && !.equalsRegexpIgnoreCase(argName, "^stages?$")) { if (!.arraysAreEqual(naIndicesBefore, naIndices)) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "inconsistent NA definition for group ", groupNumber, "; ", "if NA's exist, then they are mandatory for each group at the same stage" ) } } naIndicesBefore <- naIndices } } } dataFrame <- as.data.frame(args) if (length(intersect(tolower(names(dataFrame)), c("stage", "stages"))) == 0) { dataFrame$stages <- 1:nrow(dataFrame) } return(dataFrame) } .getDataFrameFromArgs <- function(...) { args <- list(...) if (length(args) == 0) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "cannot initialize dataset because no data are defined" ) } dataFrame <- NULL dataFrameCounter <- 0 for (arg in args) { if (is.data.frame(arg)) { dataFrameCounter <- dataFrameCounter + 1 if (is.null(dataFrame)) { dataFrame <- arg } } } if (dataFrameCounter > 1) { warning("Found ", dataFrameCounter, ", data.frame arguments; ", "only the first data.frame will be used for the initialization of the dataset", call. = FALSE ) } return(dataFrame) } .getDesignFromArgs <- function(...) { args <- list(...) if (length(args) == 0) { return(NULL) } for (arg in args) { if (.isTrialDesign(arg)) { return(arg) } } return(NULL) } .getDatasetFromArgs <- function(...) { args <- list(...) if (length(args) == 0) { return(NULL) } for (arg in args) { if (.isDataset(arg)) { return(arg) } } return(NULL) } .removeDesignFromArgs <- function(args) { for (i in 1:length(args)) { if (.isTrialDesign(args[[i]])) { return(args[-i]) } } return(args) } .getArgumentNames <- function(...) { dataFrame <- .getDataFrameFromArgs(...) if (!is.null(dataFrame)) { return(names(dataFrame)) } args <- list(...) if (length(args) == 0) { return(character(0)) } args <- .removeDesignFromArgs(args) return(names(args)) } .assertIsValidDatasetArgument <- function(...) { argNames <- .getArgumentNames(...) if (length(argNames) == 0) { return(TRUE) } argNamesLower <- tolower(argNames) dataObjectkeyWords <- unique(tolower(C_KEY_WORDS)) multiArmKeywords <- tolower(c( C_KEY_WORDS_SUBSETS, C_KEY_WORDS_EVENTS, C_KEY_WORDS_OVERALL_EVENTS, C_KEY_WORDS_SAMPLE_SIZES, C_KEY_WORDS_OVERALL_SAMPLE_SIZES, C_KEY_WORDS_MEANS, C_KEY_WORDS_OVERALL_MEANS, C_KEY_WORDS_ST_DEVS, C_KEY_WORDS_OVERALL_ST_DEVS, C_KEY_WORDS_ALLOCATION_RATIOS, C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, C_KEY_WORDS_LOG_RANKS, C_KEY_WORDS_OVERALL_LOG_RANKS )) enrichmentKeywords <- tolower(c( C_KEY_WORDS_EXPECTED_EVENTS, C_KEY_WORDS_VARIANCE_EVENTS, C_KEY_WORDS_OVERALL_EXPECTED_EVENTS, C_KEY_WORDS_OVERALL_VARIANCE_EVENTS )) unknownArgs <- setdiff(argNamesLower, dataObjectkeyWords) unknownArgsChecked <- unknownArgs unknownArgs <- c() for (unknownArg in unknownArgsChecked) { unknown <- TRUE for (multiArmKeyword in multiArmKeywords) { if (grepl(paste0(multiArmKeyword, "\\d{1,4}"), unknownArg)) { unknown <- FALSE } } for (enrichmentKeyword in enrichmentKeywords) { if (grepl(enrichmentKeyword, unknownArg)) { unknown <- FALSE } } if (unknown) { unknownArgs <- c(unknownArgs, unknownArg) } } if (length(unknownArgs) > 0) { for (i in 1:length(unknownArgs)) { unknownArgs[i] <- argNames[argNamesLower == unknownArgs[i]][1] } if (length(unknownArgs) == 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the argument '", unknownArgs, "' is not a valid dataset argument" ) } else { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the arguments ", .arrayToString(unknownArgs, encapsulate = TRUE), " are no valid dataset arguments" ) } } invisible(TRUE) } .getParameterNameVariant <- function(x, sep = ".") { # x <- "overallExpectedEvents" if (identical(x, tolower(x))) { return(x) } indices <- gregexpr("[A-Z]", x)[[1]] parts <- strsplit(x, "[A-Z]")[[1]] result <- "" for (i in 1:length(indices)) { index <- indices[i] y <- tolower(substring(x, index, index)) result <- paste0(result, parts[i], sep, y) } if (length(parts) > length(indices)) { result <- paste0(result, parts[length(parts)]) } return(trimws(result)) } .getAllParameterNameVariants <- function(parameterNameVariants) { overallParameterNameVariants <- parameterNameVariants[grepl("^overall", parameterNameVariants)] if (length(overallParameterNameVariants) > 0) { overallParameterNameVariants <- c( gsub("^overall", "cumulative", overallParameterNameVariants), gsub("^overall", "cum", overallParameterNameVariants) ) } parameterNameVariants <- c(parameterNameVariants, overallParameterNameVariants) otherVariants <- character(0) for (parameterNameVariant in parameterNameVariants) { otherVariants <- c(otherVariants, .getParameterNameVariant(parameterNameVariant, ".")) otherVariants <- c(otherVariants, .getParameterNameVariant(parameterNameVariant, "_")) } return(unique(c(parameterNameVariants, otherVariants))) } .isDataObject <- function(..., dataObjectkeyWords) { .assertIsValidDatasetArgument(...) argNames <- .getArgumentNames(...) if (length(argNames) == 0) { return(FALSE) } dataObjectkeyWords <- .getAllParameterNameVariants(dataObjectkeyWords) matching <- intersect(argNames, dataObjectkeyWords) return(length(matching) > 0) } .isDataObjectEnrichment <- function(...) { enrichmentEnabled <- .isDataObject(..., dataObjectkeyWords = c(C_KEY_WORDS_SUBSETS, paste0(C_KEY_WORDS_SUBSETS, "1")) ) if (!enrichmentEnabled) { return(FALSE) } args <- list(...) if (length(args) == 1 && is.data.frame(args[[1]])) { data <- args[[1]] if ("subsets" %in% colnames(data) && all(is.na(data[["subsets"]]))) { return(FALSE) } } return(enrichmentEnabled) } .isDataObjectMeans <- function(...) { dataObjectkeyWords <- c( C_KEY_WORDS_MEANS, C_KEY_WORDS_ST_DEVS, C_KEY_WORDS_OVERALL_MEANS, C_KEY_WORDS_OVERALL_ST_DEVS ) dataObjectkeyWords <- c(dataObjectkeyWords, paste0(dataObjectkeyWords, c(1, 2))) return(.isDataObject(..., dataObjectkeyWords = dataObjectkeyWords)) } .isDataObjectRates <- function(...) { dataObjectkeyWordsExpected <- c( C_KEY_WORDS_EVENTS, C_KEY_WORDS_OVERALL_EVENTS ) dataObjectkeyWordsForbidden <- c( C_KEY_WORDS_OVERALL_LOG_RANKS, C_KEY_WORDS_LOG_RANKS, C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, C_KEY_WORDS_ALLOCATION_RATIOS, C_KEY_WORDS_EXPECTED_EVENTS, C_KEY_WORDS_VARIANCE_EVENTS, C_KEY_WORDS_OVERALL_EXPECTED_EVENTS, C_KEY_WORDS_OVERALL_VARIANCE_EVENTS ) dataObjectkeyWordsExpected <- c(dataObjectkeyWordsExpected, paste0(dataObjectkeyWordsExpected, c(1, 2))) dataObjectkeyWordsForbidden <- c(dataObjectkeyWordsForbidden, paste0(dataObjectkeyWordsForbidden, c(1, 2))) return(.isDataObject(..., dataObjectkeyWords = dataObjectkeyWordsExpected) && !.isDataObject(..., dataObjectkeyWords = dataObjectkeyWordsForbidden)) } .isDataObjectSurvival <- function(...) { dataObjectkeyWords <- c( C_KEY_WORDS_OVERALL_LOG_RANKS, C_KEY_WORDS_LOG_RANKS, C_KEY_WORDS_OVERALL_ALLOCATION_RATIOS, C_KEY_WORDS_ALLOCATION_RATIOS ) dataObjectkeyWords <- c(dataObjectkeyWords, paste0(dataObjectkeyWords, c(1, 2))) return(.isDataObject(..., dataObjectkeyWords = dataObjectkeyWords)) } .isDataObjectNonStratifiedEnrichmentSurvival <- function(...) { dataObjectkeyWords <- c( C_KEY_WORDS_EXPECTED_EVENTS, C_KEY_WORDS_VARIANCE_EVENTS, C_KEY_WORDS_OVERALL_EXPECTED_EVENTS, C_KEY_WORDS_OVERALL_VARIANCE_EVENTS ) return(.isDataObject(..., dataObjectkeyWords = dataObjectkeyWords)) } #' #' @title #' Get Wide Format #' #' @description #' Returns the specified dataset as a \code{\link[base]{data.frame}} in so-called wide format. #' #' @details #' In the wide format (unstacked), the data are presented with each different data variable in a separate column, i.e., #' the different groups are in separate columns. #' #' @seealso #' \code{\link[=getLongFormat]{getLongFormat()}} for returning the dataset as a \code{\link[base]{data.frame}} in long format. #' #' @return A \code{\link[base]{data.frame}} will be returned. #' #' @keywords internal #' #' @export #' getWideFormat <- function(dataInput) { .assertIsDataset(dataInput) paramNames <- names(dataInput) paramNames <- paramNames[!(paramNames %in% c("groups"))] if (!dataInput$.enrichmentEnabled) { paramNames <- paramNames[!(paramNames %in% c("subsets"))] } numberOfSubsets <- dataInput$getNumberOfSubsets() numberOfGroups <- dataInput$getNumberOfGroups(survivalCorrectionEnabled = FALSE) if (numberOfSubsets <= 1) { numberOfStages <- dataInput$getNumberOfStages() df <- data.frame(stages = 1:numberOfStages) } else { numberOfStages <- length(dataInput$subsets) / numberOfGroups / numberOfSubsets df <- data.frame(stages = rep(1:numberOfStages, numberOfSubsets)) } for (paramName in paramNames) { if (numberOfGroups == 1) { df[[paramName]] <- dataInput[[paramName]] } else { for (group in 1:numberOfGroups) { if (paramName %in% c("stages", "subsets")) { varName <- paramName } else { varName <- paste0(paramName, group) } df[[varName]] <- dataInput[[paramName]][dataInput$groups == group] } } } return(df) } .getNumberOfStages <- function(dataFrame, naOmitEnabled = TRUE) { if (naOmitEnabled) { colNames <- colnames(dataFrame) validColNames <- character(0) for (colName in colNames) { colValues <- dataFrame[, colName] if (length(colValues) > 0 && !all(is.na(colValues))) { validColNames <- c(validColNames, colName) } } subData <- stats::na.omit(dataFrame[, validColNames]) numberOfStages <- length(unique(as.character(subData$stage))) if (numberOfStages == 0) { print(dataFrame[, validColNames]) stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'dataFrame' seems to contain an invalid column" ) } return(numberOfStages) } return(length(levels(dataFrame$stage))) } .getWideFormat <- function(dataFrame) { if (!is.data.frame(dataFrame)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataFrame' must be a data.frame (is ", .getClassName(dataFrame), ")") } paramNames <- names(dataFrame) paramNames <- paramNames[!(paramNames %in% c("stage", "group", "subset"))] numberOfSubsets <- ifelse(is.factor(dataFrame$subset), length(levels(dataFrame$subset)), length(unique(na.omit(dataFrame$subset))) ) numberOfGroups <- ifelse(is.factor(dataFrame$group), length(levels(dataFrame$group)), length(unique(na.omit(dataFrame$group))) ) if (numberOfSubsets <= 1) { df <- data.frame(stage = 1:.getNumberOfStages(dataFrame)) } else { df <- data.frame(stage = 1:(length(dataFrame$subset) / numberOfGroups)) } for (paramName in paramNames) { if (numberOfGroups == 1) { df[[paramName]] <- dataFrame[[paramName]] } else { for (group in 1:numberOfGroups) { varName <- paste0(paramName, group) values <- dataFrame[[paramName]][dataFrame$group == group] df[[varName]] <- values } } } if (numberOfSubsets > 1) { stages <- dataFrame$stage[dataFrame$group == 1] df$stage <- stages # sort(rep(stages, multiplier)) subsets <- dataFrame$subset[dataFrame$group == 1] if (nrow(df) != length(subsets)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "something went wrong: ", nrow(df), " != ", length(subsets)) } df$subset <- subsets df <- .moveColumn(df, "subset", "stage") # df <- df[with(data.frame(subset = df$subset, index = as.integer(sub("\\D", "", df$subset))), order(index)), ] } return(df) } #' #' @title #' Get Long Format #' #' @description #' Returns the specified dataset as a \code{\link[base]{data.frame}} in so-called long format. #' #' @details #' In the long format (narrow, stacked), the data are presented with one column containing #' all the values and another column listing the context of the value, i.e., #' the data for the different groups are in one column and the dataset contains an additional "group" column. #' #' @seealso #' \code{\link[=getWideFormat]{getWideFormat()}} for returning the dataset as a \code{\link[base]{data.frame}} in wide format. #' #' @return A \code{\link[base]{data.frame}} will be returned. #' #' @keywords internal #' #' @export #' getLongFormat <- function(dataInput) { .assertIsDataset(dataInput) return(as.data.frame(dataInput, niceColumnNamesEnabled = FALSE)) } .setConditionalPowerArguments <- function(results, dataInput, nPlanned, allocationRatioPlanned) { .assertIsAnalysisResults(results) .setNPlanned(results, nPlanned) numberOfGroups <- dataInput$getNumberOfGroups() .assertIsValidAllocationRatioPlanned(allocationRatioPlanned, numberOfGroups) if (!.isConditionalPowerEnabled(nPlanned) || numberOfGroups == 1) { if (numberOfGroups == 1) { if (length(allocationRatioPlanned) == 1 && !identical(allocationRatioPlanned, 1)) { warning("'allocationRatioPlanned' (", allocationRatioPlanned, ") ", "will be ignored because the specified data has only one group", call. = FALSE ) } } else if (!identical(allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT)) { warning("'allocationRatioPlanned' (", allocationRatioPlanned, ") ", "will be ignored because 'nPlanned' is not defined", call. = FALSE ) } results$.setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) return(invisible(results)) } .setValueAndParameterType(results, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) return(invisible(results)) } .getRecalculatedInformationRates <- function(dataInput, maxInformation, stage = NA_integer_) { .assertIsSingleInteger(stage, "stage", naAllowed = TRUE, validateType = FALSE) stageFromData <- dataInput$getNumberOfStages() if (is.null(stage) || is.na(stage) || stage > stageFromData) { stage <- stageFromData } informationRates <- rep(NA_real_, stage) absoluteInformations <- rep(NA_real_, stage) if (.isDatasetMeans(dataInput) || .isDatasetRates(dataInput)) { for (k in 1:stage) { sampleSizes <- dataInput$getOverallSampleSizes(stage = k) absoluteInformations[k] <- sum(sampleSizes, na.rm = TRUE) informationRates[k] <- absoluteInformations[k] / maxInformation } } else if (.isDatasetSurvival(dataInput)) { for (k in 1:stage) { events <- dataInput$getOverallEvents(stage = k) absoluteInformations[k] <- sum(events, na.rm = TRUE) informationRates[k] <- absoluteInformations[k] / maxInformation } } else { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'dataInput' class ", .getClassName(dataInput), " is not supported") } return(list(informationRates = informationRates, absoluteInformations = absoluteInformations, stage = stage)) } #' @title #' Get Observed Information Rates #' #' @description #' Recalculates the observed information rates from the specified dataset. #' #' @param dataInput The dataset for which the information rates shall be recalculated. #' @inheritParams param_maxInformation #' @inheritParams param_informationEpsilon #' @inheritParams param_stage #' @inheritParams param_three_dots #' #' @details #' For means and rates the maximum information is the maximum number of subjects #' or the relative proportion if \code{informationEpsilon} < 1; #' for survival data it is the maximum number of events #' or the relative proportion if \code{informationEpsilon} < 1. #' #' @seealso #' \itemize{ #' \item \code{\link[=getAnalysisResults]{getAnalysisResults()}} for using #' \code{getObservedInformationRates()} implicit, #' \item \href{https://www.rpact.org/vignettes/planning/rpact_boundary_update_example/}{www.rpact.org/vignettes/planning/rpact_boundary_update_example} #' } #' #' @examples #' # Absolute information epsilon: #' # decision rule 45 >= 46 - 1, i.e., under-running #' data <- getDataset( #' overallN = c(22, 45), #' overallEvents = c(11, 28) #' ) #' getObservedInformationRates(data, #' maxInformation = 46, informationEpsilon = 1 #' ) #' #' # Relative information epsilon: #' # last information rate = 45/46 = 0.9783, #' # is > 1 - 0.03 = 0.97, i.e., under-running #' data <- getDataset( #' overallN = c(22, 45), #' overallEvents = c(11, 28) #' ) #' getObservedInformationRates(data, #' maxInformation = 46, informationEpsilon = 0.03 #' ) #' #' @return Returns a list that summarizes the observed information rates. #' #' @export #' getObservedInformationRates <- function(dataInput, ..., maxInformation = NULL, informationEpsilon = NULL, stage = NA_integer_) { .assertIsDataset(dataInput) .assertIsSingleInteger(maxInformation, "maxInformation", validateType = FALSE) information <- .getRecalculatedInformationRates(dataInput, maxInformation, stage = stage) informationRates <- information$informationRates absoluteInformations <- information$absoluteInformations stage <- information$stage status <- "interim-stage" showObservedInformationRatesMessage <- .getOptionalArgument("showObservedInformationRatesMessage", ...) if (is.null(showObservedInformationRatesMessage) || !is.logical(showObservedInformationRatesMessage)) { showObservedInformationRatesMessage <- TRUE } # Updates at the final analysis in case the observed information at the final analysis # is larger ("over-running") or smaller ("under-running") than the planned maximum information if (informationRates[length(informationRates)] < 1) { underRunningEnabled <- FALSE if (!is.null(informationEpsilon)) { .assertIsSingleNumber(informationEpsilon, "informationEpsilon") .assertIsInOpenInterval(informationEpsilon, "informationEpsilon", lower = 0, upper = maxInformation) lastInformationRate <- informationRates[length(informationRates)] lastInformationNumber <- absoluteInformations[length(absoluteInformations)] if (informationEpsilon < 1) { if (lastInformationRate >= (1 - informationEpsilon)) { message( "Under-running: relative information epsilon ", round(informationEpsilon, 4), " is applicable; ", "use observed information ", lastInformationNumber, " instead of planned information ", maxInformation ) information <- .getRecalculatedInformationRates( dataInput, lastInformationNumber, stage = stage ) informationRates <- information$informationRates absoluteInformations <- information$absoluteInformations stage <- information$stage underRunningEnabled <- TRUE maxInformation <- lastInformationNumber showObservedInformationRatesMessage <- FALSE } } else { if ((lastInformationNumber + informationEpsilon) >= maxInformation) { message( "Under-running: absolute information epsilon ", round(informationEpsilon, 1), " is applicable; ", "use observed information ", lastInformationNumber, " instead of planned information ", maxInformation ) maxInformation <- lastInformationNumber information <- .getRecalculatedInformationRates( dataInput, lastInformationNumber, stage = stage ) informationRates <- information$informationRates absoluteInformations <- information$absoluteInformations stage <- information$stage underRunningEnabled <- TRUE showObservedInformationRatesMessage <- FALSE } } } if (!underRunningEnabled) { informationRates <- c(informationRates, 1) } else { status <- "under-running" } } else { lastInformationNumber <- absoluteInformations[length(absoluteInformations)] if (lastInformationNumber > maxInformation) { information <- .getRecalculatedInformationRates( dataInput, lastInformationNumber, stage = stage ) informationRates <- information$informationRates absoluteInformations <- information$absoluteInformations stage <- information$stage message( "Over-running: observed information ", lastInformationNumber, " at stage ", length(absoluteInformations), " is larger than the maximum planned information ", maxInformation, "; information rates will be recalculated" ) status <- "over-running" maxInformation <- lastInformationNumber showObservedInformationRatesMessage <- FALSE } } if (any(informationRates > 1)) { warning("The observed information at stage ", .arrayToString(which(informationRates > 1)), " is over-running, ", "i.e., the information rate (", .arrayToString(informationRates[informationRates > 1]), ") ", "is larger than the planned maximum information rate (1)", call. = FALSE ) } informationRates[informationRates > 1] <- 1 end <- min(which(informationRates == 1)) informationRates <- informationRates[1:end] if (showObservedInformationRatesMessage) { message( "The observed information rates for 'maxInformation' = ", maxInformation, " at stage ", stage, " are: ", .arrayToString(informationRates) ) } if (status == "interim-stage" && informationRates[length(informationRates)] == 1 && stage == length(informationRates)) { status <- "final-stage" } return(list( absoluteInformations = absoluteInformations, maxInformation = maxInformation, informationEpsilon = informationEpsilon, informationRates = informationRates, status = status )) } .synchronizeIterationsAndSeed <- function(results) { if (is.null(results[[".conditionalPowerResults"]])) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, sQuote(.getClassName(results)), " does not contain field ", sQuote(".conditionalPowerResults") ) } if (results$.design$kMax == 1) { return(invisible(results)) } if (results$.conditionalPowerResults$simulated) { results$conditionalPowerSimulated <- results$.conditionalPowerResults$conditionalPower results$.setParameterType("conditionalPower", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalPowerSimulated", C_PARAM_GENERATED) results$.setParameterType("seed", results$.conditionalPowerResults$.getParameterType("seed")) results$seed <- results$.conditionalPowerResults$seed results$.setParameterType( "iterations", results$.conditionalPowerResults$.getParameterType("iterations") ) results$iterations <- results$.conditionalPowerResults$iterations } else { results$conditionalPower <- results$.conditionalPowerResults$conditionalPower if (is.matrix(results$conditionalPowerSimulated)) { results$conditionalPowerSimulated <- matrix() } else { results$conditionalPowerSimulated <- numeric(0) } results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$.setParameterType("conditionalPowerSimulated", C_PARAM_NOT_APPLICABLE) results$.setParameterType("seed", C_PARAM_NOT_APPLICABLE) results$.setParameterType("iterations", C_PARAM_NOT_APPLICABLE) } return(invisible(results)) } .updateParameterTypeOfIterationsAndSeed <- function(results, ...) { if (!results$simulated) { results$iterations <- NA_integer_ results$seed <- NA_real_ results$.setParameterType("iterations", C_PARAM_NOT_APPLICABLE) results$.setParameterType("seed", C_PARAM_NOT_APPLICABLE) return(invisible(results)) } iterations <- .getOptionalArgument("iterations", ...) results$.setParameterType("iterations", ifelse(is.null(iterations) || is.na(iterations) || identical(iterations, C_ITERATIONS_DEFAULT), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) seed <- .getOptionalArgument("seed", ...) results$.setParameterType("seed", ifelse(!is.null(seed) && !is.na(seed), C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE )) return(invisible(results)) } rpact/R/data.R0000644000176200001440000001446014445307575012643 0ustar liggesusers## | ## | *Data* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' One-Arm Dataset of Means #' #' A dataset containing the sample sizes, means, and standard deviations of one group. #' Use \code{getDataset(dataMeans)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataMeans" #' One-Arm Dataset of Rates #' #' A dataset containing the sample sizes and events of one group. #' Use \code{getDataset(dataRates)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataRates" #' One-Arm Dataset of Survival Data #' #' A dataset containing the log-rank statistics, events, and allocation ratios of one group. #' Use \code{getDataset(dataSurvival)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataSurvival" ## Mulit-arm #' Multi-Arm Dataset of Means #' #' A dataset containing the sample sizes, means, and standard deviations of four groups. #' Use \code{getDataset(dataMultiArmMeans)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataMultiArmMeans" #' Multi-Arm Dataset of Rates #' #' A dataset containing the sample sizes and events of three groups. #' Use \code{getDataset(dataMultiArmRates)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataMultiArmRates" #' Multi-Arm Dataset of Survival Data #' #' A dataset containing the log-rank statistics, events, and allocation ratios of three groups. #' Use \code{getDataset(dataMultiArmSurvival)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataMultiArmSurvival" ## Enrichment #' Enrichment Dataset of Means #' #' A dataset containing the sample sizes, means, and standard deviations of two groups. #' Use \code{getDataset(dataEnrichmentMeans)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataEnrichmentMeans" #' Enrichment Dataset of Rates #' #' A dataset containing the sample sizes and events of two groups. #' Use \code{getDataset(dataEnrichmentRates)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataEnrichmentRates" #' Enrichment Dataset of Survival Data #' #' A dataset containing the log-rank statistics, events, and allocation ratios of two groups. #' Use \code{getDataset(dataEnrichmentSurvival)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataEnrichmentSurvival" ## Enrichment Stratified #' Stratified Enrichment Dataset of Means #' #' A dataset containing the sample sizes, means, and standard deviations of two groups. #' Use \code{getDataset(dataEnrichmentMeansStratified)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataEnrichmentMeansStratified" #' Stratified Enrichment Dataset of Rates #' #' A dataset containing the sample sizes and events of two groups. #' Use \code{getDataset(dataEnrichmentRatesStratified)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataEnrichmentRatesStratified" #' Stratified Enrichment Dataset of Survival Data #' #' A dataset containing the log-rank statistics, events, and allocation ratios of two groups. #' Use \code{getDataset(dataEnrichmentSurvivalStratified)} to create a dataset object that can be processed by \code{\link[=getAnalysisResults]{getAnalysisResults()}}. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "dataEnrichmentSurvivalStratified" #' #' @title #' Raw Dataset Of A Two Arm Continuous Outcome With Covariates #' #' @description #' An artificial dataset that was randomly generated #' with simulated normal data. The data set has six variables: #' #' 1. Subject id #' 2. Stage number #' 3. Group name #' 4. An example outcome in that we are interested in #' 5. The first covariate *gender* #' 6. The second covariate *covariate* #' #' @details #' See the vignette "Two-arm analysis for continuous data with covariates from raw data" #' to learn how to #' #' * import raw data from a csv file, #' * calculate estimated adjusted (marginal) means (EMMs, least-squares means) for a linear model, and #' * perform two-arm interim analyses with these data. #' #' You can use \code{rawDataTwoArmNormal} to reproduce the examples in the vignette. #' #' @format A \code{\link[base]{data.frame}} object. #' #' @keywords internal #' "rawDataTwoArmNormal" rpact/R/f_simulation_utilities.R0000644000176200001440000006022514445307576016517 0ustar liggesusers## | ## | *Simulation of multi-arm design with combination test and conditional error approach* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R NULL #' #' @title #' Set Seed #' #' @description #' Sets the seed, generates it if \code{is.na(seed) == TRUE} and returns it. #' #' @param seed the seed to set. #' #' @details #' Internal function. #' #' @return the (generated) seed. #' #' @examples #' #' .setSeed(12345) #' #' mySeed <- .setSeed() #' #' @keywords internal #' #' @noRd #' .setSeed <- function(seed = NA_real_) { if (!is.null(seed) && !is.na(seed)) { if (is.na(as.integer(seed))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'seed' must be a valid integer") } set.seed(seed = seed, kind = "Mersenne-Twister", normal.kind = "Inversion") return(seed) } if (exists(".Random.seed") && length(.Random.seed) > 0) { seed <- .Random.seed[length(.Random.seed)] } else { seed <- round(stats::runif(1) * 1e8) } .logDebug("Set seed to %s", seed) tryCatch( { set.seed(seed, kind = "Mersenne-Twister", normal.kind = "Inversion") }, error = function(e) { .logError("Failed to set seed to '%s' (%s): %s", seed, .getClassName(seed), e) seed <- NA_real_ traceback() } ) invisible(seed) } .getGMaxFromSubGroups <- function(subGroups) { .assertIsCharacter(subGroups, "subGroups") subGroups[subGroups == "S"] <- "S1" subGroups <- trimws(gsub("\\D", "", subGroups)) subGroups <- subGroups[subGroups != ""] if (length(subGroups) == 0) { return(1) } gMax <- max(as.integer(unlist(strsplit(subGroups, "", fixed = TRUE)))) + 1 return(gMax) } .getSimulationParametersFromRawData <- function(data, ..., variantName, maxNumberOfIterations = NA_integer_) { if (is.null(data) || length(data) != 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'data' must be a valid data.frame or a simulation result object") } if (inherits(data, "SimulationResults")) { data <- data[[".data"]] } if (!is.data.frame(data)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'data' (", .getClassName(data), ") must be a data.frame or a simulation result object") } if (is.na(maxNumberOfIterations)) { maxNumberOfIterations <- max(data$iterationNumber) } stageNumbers <- sort(unique(na.omit(data$stageNumber))) kMax <- max(stageNumbers) variantLevels <- sort(unique(na.omit(data[[variantName]]))) numberOfVariants <- length(variantLevels) sampleSizes <- matrix(0, nrow = kMax, ncol = numberOfVariants) rejectPerStage <- matrix(0, nrow = kMax, ncol = numberOfVariants) futilityPerStage <- matrix(0, nrow = kMax - 1, ncol = numberOfVariants) expectedNumberOfSubjects <- rep(0, numberOfVariants) conditionalPowerAchieved <- matrix(NA_real_, nrow = kMax, ncol = numberOfVariants) index <- 1 for (variantValue in variantLevels) { subData <- data[data[[variantName]] == variantValue, ] iterations <- table(subData$stageNumber) for (k in sort(unique(na.omit(subData$stageNumber)))) { subData2 <- subData[subData$stageNumber == k, ] sampleSizes[k, index] <- sum(subData2$numberOfSubjects) / iterations[k] rejectPerStage[k, index] <- sum(subData2$rejectPerStage) / maxNumberOfIterations if (k < kMax) { futilityPerStage[k, index] <- sum(na.omit(subData2$futilityPerStage)) / maxNumberOfIterations } expectedNumberOfSubjects[index] <- expectedNumberOfSubjects[index] + sum(subData2$numberOfSubjects) / maxNumberOfIterations if (k > 1) { conditionalPowerAchieved[k, index] <- sum(subData$conditionalPowerAchieved[subData$stageNumber == k]) / iterations[k] } } index <- index + 1 } overallReject <- colSums(rejectPerStage) futilityStop <- colSums(futilityPerStage) iterations <- table(data$stageNumber, data[[variantName]]) if (kMax > 1) { if (numberOfVariants == 1) { earlyStop <- sum(futilityPerStage) + sum(rejectPerStage[1:(kMax - 1)]) } else { if (kMax > 2) { rejectPerStageColSum <- colSums(rejectPerStage[1:(kMax - 1), ]) } else { rejectPerStageColSum <- rejectPerStage[1, ] } earlyStop <- colSums(futilityPerStage) + rejectPerStageColSum } } else { earlyStop <- rep(0, numberOfVariants) } sampleSizes[is.na(sampleSizes)] <- 0 return(list( sampleSizes = sampleSizes, rejectPerStage = rejectPerStage, overallReject = overallReject, futilityPerStage = futilityPerStage, futilityStop = futilityStop, iterations = iterations, earlyStop = earlyStop, expectedNumberOfSubjects = expectedNumberOfSubjects, conditionalPowerAchieved = conditionalPowerAchieved )) } .assertArgumentFitsWithSubGroups <- function(arg, argName, subGroups) { if (is.null(arg) || length(arg) == 0 || all(is.na(arg))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'effectList' must contain ", sQuote(argName)) } argName <- paste0("effectList$", argName) len <- ifelse(is.matrix(arg), ncol(arg), length(arg)) if (len != length(subGroups)) { argName <- sQuote(argName) if (!is.matrix(arg)) { argName <- paste0(argName, " (", .arrayToString(arg), ")") } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, argName, " must have ", length(subGroups), " columns given by the number of sub-groups" ) } } C_EFFECT_LIST_NAMES_EXPECTED_MEANS <- c("subGroups", "prevalences", "effects", "stDevs") C_EFFECT_LIST_NAMES_EXPECTED_RATES <- c("subGroups", "prevalences", "piControls", "piTreatments") C_EFFECT_LIST_NAMES_EXPECTED_SURVIVAL <- c("subGroups", "prevalences", "piControls", "hazardRatios") .getEffectData <- function(effectList, ..., endpoint = NA_character_, gMax = NA_integer_, nullAllowed = TRUE, parameterNameWarningsEnabled = TRUE) { if (nullAllowed && is.null(effectList)) { return(NULL) } .assertIsSingleInteger(gMax, "gMax", naAllowed = TRUE, validateType = FALSE) if (is.null(effectList) || length(effectList) == 0 || !is.list(effectList)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("effectList"), " must be a non-empty list") } effectListNames <- names(effectList) if (is.null(effectListNames) || any(nchar(trimws(effectListNames)) == 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("effectList"), " must be named. Current names are ", .arrayToString(effectListNames, encapsulate = TRUE) ) } for (singularName in c( "subGroup", "effect", "piTreatment", "piControl", "hazardRatio", "prevalence", "stDev" )) { names(effectList)[names(effectList) == singularName] <- paste0(singularName, "s") } effectListNames <- names(effectList) if (!("subGroups" %in% effectListNames)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("effectList"), " must contain ", sQuote("subGroups")) } subGroups <- effectList[["subGroups"]] if (is.null(subGroups) || length(subGroups) == 0 || (!is.character(subGroups) && !is.factor(subGroups))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("effectList$subGroups"), " must be a non-empty character vector or factor" ) } if (is.factor(subGroups)) { subGroups <- as.character(subGroups) } expectedSubGroups <- "F" if (length(subGroups) > 1) { if (is.na(gMax)) { if (length(subGroups) > 2) { gMax <- max(as.integer(strsplit(gsub("\\D", "", paste0(subGroups, collapse = "")), "", fixed = TRUE )[[1]]), na.rm = TRUE) + 1 } else { gMax <- length(subGroups) } } if ("F" %in% subGroups) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "definition of full population 'F' ", "together with sub-groups", ifelse(length(subGroups) == 2, "", "s"), " ", .arrayToString(subGroups[subGroups != "F"], encapsulate = TRUE, mode = "and"), " makes no sense and is not allowed (use remaining population 'R' instead of 'F')" ) } expectedSubGroups <- .createSubsetsByGMax(gMax, stratifiedInput = TRUE, all = FALSE) if (gMax < 3) { expectedSubGroups <- gsub("\\d", "", expectedSubGroups) } } missingSubGroups <- expectedSubGroups[!(expectedSubGroups %in% subGroups)] if (length(missingSubGroups) > 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("effectList$subGroups"), " must contain ", .arrayToString(dQuote(missingSubGroups)) ) } unknownSubGroups <- subGroups[!(subGroups %in% expectedSubGroups)] if (length(unknownSubGroups) > 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("effectList$subGroups"), " must not contain ", .arrayToString(dQuote(unknownSubGroups)), " (valid sub-group names: ", .arrayToString(dQuote(expectedSubGroups)), ")" ) } matrixName <- NA_character_ matrixNames <- c("effects", "piTreatments", "hazardRatios") if (!is.na(endpoint)) { if (endpoint == "means") { matrixNames <- "effects" } else if (endpoint == "rates") { matrixNames <- "piTreatments" } else if (endpoint == "survival") { matrixNames <- "hazardRatios" } } for (m in matrixNames) { if (m %in% effectListNames) { matrixName <- m break } } if (is.na(matrixName)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("effectList"), " must contain ", .arrayToString(matrixNames, mode = "or", encapsulate = TRUE) ) } matrixValues <- effectList[[matrixName]] if (is.vector(matrixValues)) { matrixValues <- matrix(matrixValues, nrow = 1) } if (is.matrix(matrixValues)) { .assertIsValidMatrix(matrixValues, paste0("effectList$", matrixName), naAllowed = TRUE) } if (!is.matrix(matrixValues) && !is.data.frame(matrixValues)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(paste0("effectList$", matrixName)), " must be a matrix or data.frame" ) } if (!is.data.frame(matrixValues)) { matrixValues <- as.data.frame(matrixValues) } if (nrow(matrixValues) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(paste0("effectList$", matrixName)), " must have one or more rows ", "reflecting the different situations to consider" ) } .assertArgumentFitsWithSubGroups(matrixValues, matrixName, subGroups) colNames <- paste0(matrixName, 1:ncol(matrixValues)) colnames(matrixValues) <- colNames matrixValues$situation <- 1:nrow(matrixValues) longData <- stats::reshape(data = matrixValues, direction = "long", varying = colNames, idvar = "situation", sep = "") timeColumnIndex <- which(colnames(longData) == "time") colnames(longData)[timeColumnIndex] <- "subGroupNumber" longData$subGroups <- rep(NA_character_, nrow(longData)) indices <- sort(unique(longData$subGroupNumber)) for (i in indices) { longData$subGroups[longData$subGroupNumber == i] <- subGroups[i] } longData$prevalences <- rep(NA_real_, nrow(longData)) prevalences <- effectList[["prevalences"]] if (is.null(prevalences)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, sQuote("effectList$prevalences"), " must be specified") } .assertIsNumericVector(prevalences, "effectList$prevalences") .assertArgumentFitsWithSubGroups(prevalences, "prevalences", subGroups) if (abs(sum(prevalences) - 1) > 1e-04) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("effectList$prevalences"), " must sum to 1") } for (i in indices) { longData$prevalences[longData$subGroupNumber == i] <- prevalences[i] } # means only if (matrixName == "effects") { longData$stDevs <- rep(NA_real_, nrow(longData)) stDevs <- effectList[["stDevs"]] if (is.null(stDevs)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, sQuote("effectList$stDevs"), " must be specified") } .assertIsNumericVector(stDevs, "effectList$stDevs") if (!is.null(stDevs) && length(stDevs) == 1) { stDevs <- rep(stDevs, length(prevalences)) } .assertArgumentFitsWithSubGroups(stDevs, "stDevs", subGroups) for (i in indices) { longData$stDevs[longData$subGroupNumber == i] <- stDevs[i] } } # rates and survival only else if (matrixName == "piTreatments" || matrixName == "hazardRatios" && "piControls" %in% effectListNames) { longData$piControls <- rep(NA_real_, nrow(longData)) piControls <- effectList[["piControls"]] if (is.null(piControls)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, sQuote("effectList$piControls"), " must be specified") } .assertIsNumericVector(piControls, "effectList$piControls") .assertArgumentFitsWithSubGroups(piControls, "piControls", subGroups) for (i in indices) { longData$piControls[longData$subGroupNumber == i] <- piControls[i] } } rownames(longData) <- NULL # order by subGroup longData$subGroupNumber <- as.integer(gsub("\\D", "", gsub("^S$", "S1", longData$subGroups))) longData$subGroupNumber[is.na(longData$subGroupNumber)] <- 99999 longData <- longData[order(longData$subGroupNumber, longData$situation), ] longData <- .moveColumn(longData, matrixName, colnames(longData)[length(colnames(longData))]) for (singularName in c( "subGroup", "effect", "piTreatment", "piControl", "hazardRatio", "prevalence", "stDev" )) { colnames(longData)[colnames(longData) == paste0(singularName, "s")] <- singularName } longData <- longData[, colnames(longData) != "subGroupNumber"] if (parameterNameWarningsEnabled && !is.na(endpoint)) { if (endpoint == "means") { ignore <- effectListNames[!(effectListNames %in% C_EFFECT_LIST_NAMES_EXPECTED_MEANS)] } else if (endpoint == "rates") { ignore <- effectListNames[!(effectListNames %in% C_EFFECT_LIST_NAMES_EXPECTED_RATES)] } else if (endpoint == "survival") { ignore <- effectListNames[!(effectListNames %in% C_EFFECT_LIST_NAMES_EXPECTED_SURVIVAL)] } if (length(ignore) > 0) { warning("The parameter", ifelse(length(ignore) == 1, "", "s"), " ", .arrayToString(ignore, encapsulate = TRUE), " will be ignored", call. = FALSE ) } } return(longData) } .getSimulationEnrichmentEffectMatrixName <- function(obj) { if (!grepl("SimulationResultsEnrichment", .getClassName(obj))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("obj"), " must be a SimulationResultsEnrichment object (is ", .getClassName(obj), ")" ) } if (grepl("Means", .getClassName(obj))) { return("effects") } if (grepl("Rates", .getClassName(obj))) { return("piTreatments") } if (grepl("Survival", .getClassName(obj))) { return("hazardRatios") } stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "class ", .getClassName(obj), " not supported") } .getSimulationEnrichmentEffectData <- function(simulationResults, validatePlotCapability = TRUE) { effectMatrixName <- .getSimulationEnrichmentEffectMatrixName(simulationResults) effectData <- simulationResults$effectList[[effectMatrixName]] discreteXAxis <- FALSE if (ncol(effectData) == 1) { xValues <- effectData[, 1] } else { xValues <- 1:nrow(effectData) discreteXAxis <- TRUE } valid <- TRUE if (length(xValues) <= 1) { if (validatePlotCapability) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "2 ore more situations must be specifed in ", sQuote(paste0("effectList$", effectMatrixName)) ) } valid <- FALSE } return(list( effectMatrixName = effectMatrixName, effectData = effectData, xValues = xValues, discreteXAxis = discreteXAxis, valid = valid )) } .getEffectList <- function(effectData, ..., parameterName = "effectData", endpoint = NA_character_, parameterNameWarningsEnabled = TRUE) { if (is.null(effectData) || length(effectData) == 0 || !is.data.frame(effectData)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(parameterName), " must be a non-empty data.frame") } effectList <- list(subGroups = character(0), prevalences = numeric(0)) matrixName <- NA_character_ matrixNames <- c("effect", "piTreatment", "hazardRatio") names(matrixNames) <- c("means", "rates", "survival") expectedMatrixName <- ifelse(is.na(endpoint), NA_character_, matrixNames[[endpoint]]) effectDataNames <- colnames(effectData) for (m in matrixNames) { if (m %in% effectDataNames && (is.na(endpoint) || identical(m, expectedMatrixName))) { matrixName <- m break } } if (is.na(matrixName)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(parameterName), " must contain ", ifelse(!is.na(expectedMatrixName), sQuote(expectedMatrixName), .arrayToString(matrixNames, mode = "or", encapsulate = TRUE) ) ) } matrixNameNew <- paste0(matrixName, "s") effectList[[matrixNameNew]] <- NULL if (matrixName == "effect") { effectList$stDevs <- numeric(0) } else if (matrixName %in% c("piTreatment", "hazardRatio")) { effectList$piControls <- numeric(0) } for (subGroup in unique(effectData$subGroup)) { effectList$subGroups <- c(effectList$subGroups, subGroup) subData <- effectData[effectData$subGroup == subGroup, ] effectList$prevalences <- c(effectList$prevalences, subData$prevalence[1]) if (matrixName == "effect") { if (!("stDev" %in% effectDataNames)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(parameterName), " must contain ", sQuote("stDev")) } effectList$stDevs <- c(effectList$stDevs, subData$stDev[1]) } else if (matrixName == "piTreatment" || (matrixName == "hazardRatio" && "piControl" %in% effectDataNames)) { if (!("piControl" %in% effectDataNames)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(parameterName), " must contain ", sQuote("piControl")) } effectList$piControls <- c(effectList$piControls, subData$piControl[1]) } if (is.null(effectList[[matrixNameNew]])) { effectList[[matrixNameNew]] <- subData[[matrixName]] } else { effectList[[matrixNameNew]] <- cbind(effectList[[matrixNameNew]], subData[[matrixName]]) } } if (!is.matrix(effectList[[matrixNameNew]])) { effectList[[matrixNameNew]] <- matrix(effectList[[matrixNameNew]], ncol = 1) } if (parameterNameWarningsEnabled && !is.na(endpoint)) { if (endpoint == "means") { ignore <- effectDataNames[!(effectDataNames %in% gsub("s$", "", C_EFFECT_LIST_NAMES_EXPECTED_MEANS))] } else if (endpoint == "rates") { ignore <- effectDataNames[!(effectDataNames %in% gsub("s$", "", C_EFFECT_LIST_NAMES_EXPECTED_RATES))] } else if (endpoint == "survival") { ignore <- effectDataNames[!(effectDataNames %in% gsub("s$", "", C_EFFECT_LIST_NAMES_EXPECTED_SURVIVAL))] } if (length(ignore) > 0) { warning("The parameter", ifelse(length(ignore) == 1, "", "s"), " ", .arrayToString(ignore, encapsulate = TRUE), " will be ignored", call. = FALSE ) } } if (!is.null(effectList[["prevalences"]])) { .assertIsInClosedInterval(effectList$prevalences, "effectList$prevalences", lower = 0, upper = 1, call. = FALSE ) } if (!is.null(effectList[["effects"]])) { .assertIsNumericVector(effectList$effects, "effectList$effects", call. = FALSE) } for (piParam in c("piControls", "piTreatments")) { if (!is.null(effectList[[piParam]])) { if (piParam == matrixNameNew && is.matrix(effectList[[piParam]])) { for (i in 1:nrow(effectList[[piParam]])) { .assertIsInOpenInterval(effectList[[piParam]][i, ], paste0("effectList$", piParam), lower = 0, upper = 1, call. = FALSE ) } } else { .assertIsInOpenInterval(effectList[[piParam]], paste0("effectList$", piParam), lower = 0, upper = 1, call. = FALSE ) } } } for (ratioParam in c("hazardRatios", "stDevs")) { if (!is.null(effectList[[ratioParam]])) { .assertIsInOpenInterval(effectList[[ratioParam]], paste0("effectList$", ratioParam), lower = 0, upper = NULL, call. = FALSE ) } } return(effectList) } .getValidatedEffectList <- function(effectList, ..., endpoint, gMax = NA_integer_, nullAllowed = TRUE) { if (is.null(endpoint) || !(endpoint %in% c("means", "rates", "survival"))) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'endpoint' (", endpoint, ") must be one of 'means', 'rates', or 'survival'") } if (is.null(effectList) || length(effectList) == 0 || (!is.list(effectList) && !is.data.frame(effectList))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'effectList' must be a valid list or data.frame") } if (is.data.frame(effectList)) { return(.getEffectList(effectList, parameterName = "effectList", endpoint = endpoint)) } effectData <- .getEffectData(effectList, endpoint = endpoint, gMax = gMax, nullAllowed = nullAllowed) return(.getEffectList(effectData)) } .getVariedParameterSimulationMultiArm <- function(designPlan) { if (!grepl("SimulationResultsMultiArm", .getClassName(designPlan))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designPlan' (", .getClassName(designPlan), ") must be of class 'SimulationResultsMultiArm'" ) } if (grepl("Means", .getClassName(designPlan))) { return("muMaxVector") } else if (grepl("Rates", .getClassName(designPlan))) { return("piMaxVector") } else if (grepl("Survival", .getClassName(designPlan))) { return("omegaMaxVector") } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designPlan' (", .getClassName(designPlan), ") must be of class 'SimulationResultsMultiArm'" ) } rpact/R/f_core_constants.R0000644000176200001440000015651114445307575015267 0ustar liggesusers## | ## | *Constants* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R NULL C_LOG_LEVEL_TRACE <- "TRACE" C_LOG_LEVEL_DEBUG <- "DEBUG" C_LOG_LEVEL_INFO <- "INFO" C_LOG_LEVEL_WARN <- "WARN" C_LOG_LEVEL_ERROR <- "ERROR" C_LOG_LEVEL_PROGRESS <- "PROGRESS" C_LOG_LEVEL_DISABLED <- "DISABLED" C_SUMMARY_OUTPUT_SIZE_DEFAULT <- "large" C_SUMMARY_LIST_ITEM_PREFIX_DEFAULT <- " " # used in 'class_core_plot_settings.R' C_POSITION_OUTSIDE_PLOT <- 0 C_POSITION_LEFT_TOP <- 1 C_POSITION_LEFT_CENTER <- 2 C_POSITION_LEFT_BOTTOM <- 3 C_POSITION_RIGHT_TOP <- 4 C_POSITION_RIGHT_CENTER <- 5 C_POSITION_RIGHT_BOTTOM <- 6 C_DESIGN_TOLERANCE_DEFAULT <- 1e-08 C_CONST_NEWTON_COTES <- 15 C_TWO_SIDED_POWER_DEFAULT <- FALSE C_BINDING_FUTILITY_DEFAULT <- FALSE C_BINDING_FUTILITY_FISHER_DEFAULT <- TRUE C_CONST_BOUND_HP_DEFAULT <- 3 C_ALPHA_DEFAULT <- 0.025 C_BETA_DEFAULT <- 0.2 C_SIDED_DEFAULT <- 1L C_KMAX_DEFAULT <- 3L C_KMAX_UPPER_BOUND <- 20L C_KMAX_UPPER_BOUND_FISHER <- 6L C_NA_MAX_DEFAULT <- 100L C_POWER_ASN_THETA_DEFAULT <- seq(-1, 1, 0.02) C_ANALYSIS_TOLERANCE_DEFAULT <- 1e-06 C_ANALYSIS_TOLERANCE_FISHER_DEFAULT <- 1e-14 C_UPPER_BOUNDS_DEFAULT <- 8 C_FUTILITY_BOUNDS_DEFAULT <- -6 C_ALPHA_0_VEC_DEFAULT <- 1 C_THETA_H0_MEANS_DEFAULT <- 0 C_THETA_H0_RATES_DEFAULT <- 0 C_THETA_H0_SURVIVAL_DEFAULT <- 1 C_ALLOCATION_RATIO_DEFAULT <- 1 C_ALLOCATION_RATIO_MAXIMUM <- 100 C_DIRECTION_UPPER_DEFAULT <- TRUE C_NORMAL_APPROXIMATION_MEANS_DEFAULT <- FALSE C_NORMAL_APPROXIMATION_RATES_DEFAULT <- TRUE C_EQUAL_VARIANCES_DEFAULT <- TRUE C_ITERATIONS_DEFAULT <- 1000L C_ACCEPT_DEVIATION_INFORMATIONRATES <- 0.05 C_THETA_RANGE_SEQUENCE_LENGTH_DEFAULT <- 50 C_VARIED_PARAMETER_SEQUENCE_LENGTH_DEFAULT <- 30 C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL <- "TrialDesignGroupSequential" C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL <- "TrialDesignInverseNormal" C_CLASS_NAME_TRIAL_DESIGN_FISHER <- "TrialDesignFisher" C_CLASS_NAME_TRIAL_DESIGN_CONDITIONAL_DUNNETT <- "TrialDesignConditionalDunnett" .getTrialDesignClassNames <- function(inclusiveConditionalDunnett = TRUE) { trialDesignClassNames <- c( C_CLASS_NAME_TRIAL_DESIGN_GROUP_SEQUENTIAL, C_CLASS_NAME_TRIAL_DESIGN_INVERSE_NORMAL, C_CLASS_NAME_TRIAL_DESIGN_FISHER ) if (inclusiveConditionalDunnett) { trialDesignClassNames <- c(trialDesignClassNames, C_CLASS_NAME_TRIAL_DESIGN_CONDITIONAL_DUNNETT) } return(trialDesignClassNames) } C_EXCEPTION_TYPE_RUNTIME_ISSUE <- "Runtime exception: " C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT <- "Illegal argument: " C_EXCEPTION_TYPE_ILLEGAL_DATA_INPUT <- "Illegal data input: " C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS <- "Conflicting arguments: " C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS <- "Argument out of bounds: " C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS <- "Argument length out of bounds: " C_EXCEPTION_TYPE_INDEX_OUT_OF_BOUNDS <- "Index out of bounds: " C_EXCEPTION_TYPE_MISSING_ARGUMENT <- "Missing argument: " C_EXCEPTION_TYPE_INCOMPLETE_ARGUMENTS <- "Incomplete associated arguments: " C_DIRECTION_LOWER <- "lower" C_DIRECTION_UPPER <- "upper" C_QNORM_EPSILON <- 1e-100 # a value between 1e-323 and 1e-16 C_QNORM_MAXIMUM <- -stats::qnorm(C_QNORM_EPSILON) C_QNORM_MINIMUM <- -C_QNORM_MAXIMUM C_QNORM_THRESHOLD <- floor(C_QNORM_MAXIMUM) # # Constants used in 'f_analysis_multiarm' and 'f_analysis_enrichment' # C_INTERSECTION_TEST_MULTIARMED_DEFAULT <- "Dunnett" C_INTERSECTION_TEST_ENRICHMENT_DEFAULT <- "Simes" C_INTERSECTION_TESTS_MULTIARMED <- c( "Bonferroni", "Simes", "Sidak", "Dunnett", "Hierarchical" ) C_INTERSECTION_TESTS_ENRICHMENT <- c( "Bonferroni", "Simes", "Sidak", "SpiessensDebois" ) C_VARIANCE_OPTION_DUNNETT <- "overallPooled" C_VARIANCE_OPTION_MULTIARMED_DEFAULT <- "overallPooled" C_VARIANCE_OPTIONS_MULTIARMED <- c("overallPooled", "pairwisePooled", "notPooled") C_VARIANCE_OPTION_ENRICHMENT_DEFAULT <- "pooled" C_VARIANCE_OPTIONS_ENRICHMENT <- c("pooled", "notPooled", "pooledFromFull") C_STRATIFIED_ANALYSIS_DEFAULT <- TRUE # # Constants used in 'parameters.R' # C_PARAM_USER_DEFINED <- "u" C_PARAM_DEFAULT_VALUE <- "d" C_PARAM_GENERATED <- "g" C_PARAM_DERIVED <- ">" C_PARAM_NOT_APPLICABLE <- "." C_PARAM_TYPE_UNKNOWN <- "?" # # Constants used in 'f_simulation_survival.R' # C_PI_2_DEFAULT <- 0.2 C_PI_1_DEFAULT <- seq(0.2, 0.5, 0.1) C_PI_1_SAMPLE_SIZE_DEFAULT <- c(0.4, 0.5, 0.6) C_DROP_OUT_RATE_1_DEFAULT <- 0 C_DROP_OUT_RATE_2_DEFAULT <- 0 C_DROP_OUT_TIME_DEFAULT <- 12 C_EVENT_TIME_DEFAULT <- 12 C_ALLOCATION_1_DEFAULT <- 1 C_ALLOCATION_2_DEFAULT <- 1 C_MAX_ITERATIONS_DEFAULT <- 10L C_MAX_SIMULATION_ITERATIONS_DEFAULT <- 1000L C_ACCRUAL_TIME_DEFAULT <- c(0, 12) C_ACCRUAL_INTENSITY_DEFAULT <- 0.1 C_FOLLOW_UP_TIME_DEFAULT <- 6 # # Constants used in 'f_simulation_multiarm[...].R' # C_ACTIVE_ARMS_DEFAULT <- 3L C_POPULATIONS_DEFAULT <- 3L C_TYPES_OF_SELECTION <- c("best", "rBest", "epsilon", "all", "userDefined") C_TYPE_OF_SELECTION_DEFAULT <- C_TYPES_OF_SELECTION[1] C_TYPES_OF_SHAPE <- c("linear", "sigmoidEmax", "userDefined") C_TYPE_OF_SHAPE_DEFAULT <- C_TYPES_OF_SHAPE[1] C_SUCCESS_CRITERIONS <- c("all", "atLeastOne") C_SUCCESS_CRITERION_DEFAULT <- C_SUCCESS_CRITERIONS[1] C_EFFECT_MEASURES <- c("effectEstimate", "testStatistic") C_EFFECT_MEASURE_DEFAULT <- C_EFFECT_MEASURES[1] # # Additional constants used in 'f_design_sample_size_calculator.R' # C_ALTERNATIVE_DEFAULT <- seq(0.2, 1, 0.2) C_ALTERNATIVE_POWER_SIMULATION_DEFAULT <- seq(0, 1, 0.2) C_ALTERNATIVE_POWER_SIMULATION_MEAN_RATIO_DEFAULT <- seq(1, 2, 0.2) C_RANGE_OF_HAZARD_RATIOS_DEFAULT <- seq(1, 2.6, 0.4) C_STDEV_DEFAULT <- 1 # # Constants used in 'core_group_sequential_design.R' # # Type of design is one of the following: # O'Brien & Fleming, # Pocock, # Wang & Tsiatis Delta class, # Haybittle & Peto, # Optimum design within Wang & Tsiatis class, # Pocock type alpha spending, # O'Brien & Fleming type alpha spending, # Kim & DeMets alpha spending, # Hwang, Shi & DeCani alpha spending, # user defined alpha spending # C_TYPE_OF_DESIGN_OF <- "OF" # O'Brien & Fleming C_TYPE_OF_DESIGN_P <- "P" # Pocock, C_TYPE_OF_DESIGN_WT <- "WT" # Wang & Tsiatis Delta class C_TYPE_OF_DESIGN_PT <- "PT" # Pampallona & Tsiatis class C_TYPE_OF_DESIGN_HP <- "HP" # Haybittle & Peto C_TYPE_OF_DESIGN_WT_OPTIMUM <- "WToptimum" # Optimum design within Wang & Tsiatis class C_TYPE_OF_DESIGN_AS_P <- "asP" # Pocock type alpha spending C_TYPE_OF_DESIGN_AS_OF <- "asOF" # O'Brien & Fleming type alpha spending C_TYPE_OF_DESIGN_AS_KD <- "asKD" # Kim & DeMets alpha spending C_TYPE_OF_DESIGN_AS_HSD <- "asHSD" # Hwang, Shi & DeCani alpha spending C_TYPE_OF_DESIGN_AS_USER <- "asUser" # user defined alpha spending C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY <- "noEarlyEfficacy" # no early efficacy stop C_DEFAULT_TYPE_OF_DESIGN <- C_TYPE_OF_DESIGN_OF # the default type of design C_TYPE_OF_DESIGN_LIST <- list( "OF" = "O'Brien & Fleming", "P" = "Pocock", "WT" = "Wang & Tsiatis Delta class", "PT" = "Pampallona & Tsiatis class", "HP" = "Haybittle & Peto", "WToptimum" = "Optimum design within Wang & Tsiatis class", "asP" = "Pocock type alpha spending", "asOF" = "O'Brien & Fleming type alpha spending", "asKD" = "Kim & DeMets alpha spending", "asHSD" = "Hwang, Shi & DeCani alpha spending", "asUser" = "User defined alpha spending", "noEarlyEfficacy" = "No early efficacy stop" ) C_PLOT_SHOW_SOURCE_ARGUMENTS <- c("commands", "axes", "test", "validate") C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD <- "Conditional Power with Likelihood" C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD <- "Conditional power / Likelihood" .getDesignTypes <- function() { return(c( C_TYPE_OF_DESIGN_OF, C_TYPE_OF_DESIGN_P, C_TYPE_OF_DESIGN_WT, C_TYPE_OF_DESIGN_PT, C_TYPE_OF_DESIGN_HP, C_TYPE_OF_DESIGN_WT_OPTIMUM, C_TYPE_OF_DESIGN_AS_P, C_TYPE_OF_DESIGN_AS_OF, C_TYPE_OF_DESIGN_AS_KD, C_TYPE_OF_DESIGN_AS_HSD, C_TYPE_OF_DESIGN_AS_USER, C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY )) } .printDesignTypes <- function() { .arrayToString(.getDesignTypes(), encapsulate = TRUE) } .isAlphaSpendingDesignType <- function(typeOfDesign, userDefinedAlphaSpendingIncluded = TRUE) { if (userDefinedAlphaSpendingIncluded && ((typeOfDesign == C_TYPE_OF_DESIGN_AS_USER) || (typeOfDesign == C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY))) { return(TRUE) } return(typeOfDesign %in% c( C_TYPE_OF_DESIGN_AS_P, C_TYPE_OF_DESIGN_AS_OF, C_TYPE_OF_DESIGN_AS_KD, C_TYPE_OF_DESIGN_AS_HSD )) } # # Type of beta spending design is one of the following: # Pocock type beta spending, # O'Brien & Fleming type beta spending, # Kim & DeMets beta spending, # Hwang, Shi & DeCani beta spending, # user defined beta spending # "none", "bsP", "bsOF", "bsKD", "bsHSD", "bsUser" C_TYPE_OF_DESIGN_BS_NONE <- "none" C_TYPE_OF_DESIGN_BS_P <- "bsP" # Pocock type beta spending C_TYPE_OF_DESIGN_BS_OF <- "bsOF" # O'Brien & Fleming type beta spending C_TYPE_OF_DESIGN_BS_KD <- "bsKD" # Kim & DeMets beta spending C_TYPE_OF_DESIGN_BS_HSD <- "bsHSD" # Hwang, Shi & DeCani beta spending C_TYPE_OF_DESIGN_BS_USER <- "bsUser" # user defined beta spending C_TYPE_OF_DESIGN_BS_LIST <- list( "none" = "none", "bsP" = "Pocock type beta spending", "bsOF" = "O'Brien & Fleming type beta spending", "bsKD" = "Kim & DeMets beta spending", "bsHSD" = "Hwang, Shi & DeCani beta spending", "bsUser" = "user defined beta spending" ) C_CIPHERS <- list(token = "310818669631424001", secret = "9318655074497250732") .getBetaSpendingDesignTypes <- function() { return(c( C_TYPE_OF_DESIGN_BS_NONE, C_TYPE_OF_DESIGN_BS_P, C_TYPE_OF_DESIGN_BS_OF, C_TYPE_OF_DESIGN_BS_KD, C_TYPE_OF_DESIGN_BS_HSD, C_TYPE_OF_DESIGN_BS_USER )) } .printBetaSpendingDesignTypes <- function() { .arrayToString(.getBetaSpendingDesignTypes(), encapsulate = TRUE) } .isBetaSpendingDesignType <- function(typeOfDesign, userDefinedBetaSpendingIncluded = TRUE, noneIncluded = FALSE) { if (userDefinedBetaSpendingIncluded && typeOfDesign == C_TYPE_OF_DESIGN_BS_USER) { return(TRUE) } if (noneIncluded && typeOfDesign == C_TYPE_OF_DESIGN_BS_NONE) { return(TRUE) } return(typeOfDesign %in% c( C_TYPE_OF_DESIGN_BS_P, C_TYPE_OF_DESIGN_BS_OF, C_TYPE_OF_DESIGN_BS_KD, C_TYPE_OF_DESIGN_BS_HSD )) } ## ## ------------------------------------------- ## C_OPTIMIZATION_CRITERION_ASNH1 <- "ASNH1" C_OPTIMIZATION_CRITERION_ASNIFH1 <- "ASNIFH1" C_OPTIMIZATION_CRITERION_ASN_SUM <- "ASNsum" C_OPTIMIZATION_CRITERION_DEFAULT <- C_OPTIMIZATION_CRITERION_ASNH1 .getOptimizationCriterions <- function() { return(c( C_OPTIMIZATION_CRITERION_ASNH1, C_OPTIMIZATION_CRITERION_ASNIFH1, C_OPTIMIZATION_CRITERION_ASN_SUM )) } .printOptimizationCriterion <- function() { .arrayToString(.getOptimizationCriterions(), encapsulate = TRUE) } .isOptimizationCriterion <- function(x) { return(x %in% .getOptimizationCriterions()) } ## ## ------------------------------------------- ## C_FISHER_METHOD_FULL_ALPHA <- "fullAlpha" C_FISHER_METHOD_EQUAL_ALPHA <- "equalAlpha" C_FISHER_METHOD_NO_INTERACTION <- "noInteraction" C_FISHER_METHOD_USER_DEFINED_ALPHA <- "userDefinedAlpha" C_FISHER_METHOD_DEFAULT <- C_FISHER_METHOD_EQUAL_ALPHA .getFisherMethods <- function() { return(c( C_FISHER_METHOD_FULL_ALPHA, C_FISHER_METHOD_EQUAL_ALPHA, C_FISHER_METHOD_NO_INTERACTION, C_FISHER_METHOD_USER_DEFINED_ALPHA )) } .printFisherMethods <- function() { .arrayToString(.getFisherMethods(), encapsulate = TRUE) } .isFisherMethod <- function(method) { return(method %in% .getFisherMethods()) } ## ## ------------------------------------------- ## C_PARAMETER_NAMES <- list( iterations = "Iterations", seed = "Seed", groups = "Treatment groups", stages = "Stages", sampleSizes = "Sample sizes", means = "Means", stDevs = "Standard deviations", overallEvents = "Cumulative events", overallAllocationRatios = "Cumulative allocation ratios", expectedEvents = "Expected events", varianceEvents = "Variance of events", overallExpectedEvents = "Cumulative expected events", overallVarianceEvents = "Cumulative variance of events", bindingFutility = "Binding futility", constantBoundsHP = "Haybittle Peto constants", betaAdjustment = "Beta adjustment", kMax = "Maximum number of stages", alpha = "Significance level", finalStage = "Final stage", informationRates = "Information rates", criticalValues = "Critical values", criticalValuesDelayedInformation = "Upper bounds of continuation", stageLevels = "Stage levels (one-sided)", alphaSpent = "Cumulative alpha spending", tolerance = "Tolerance", method = "Method", alpha0Vec = "Alpha_0", scale = "Scale", nonStochasticCurtailment = "Non stochastic curtailment", simAlpha = "Simulated alpha", beta = "Type II error rate", betaSpent = "Cumulative beta spending", sided = "Test", futilityBounds = "Futility bounds (binding)", futilityBoundsNonBinding = "Futility bounds (non-binding)", futilityBoundsDelayedInformation = "Lower bounds of continuation (binding)", futilityBoundsDelayedInformationNonBinding = "Lower bounds of continuation (non-binding)", typeOfDesign = "Type of design", deltaWT = "Delta for Wang & Tsiatis Delta class", deltaPT0 = "Delta0 for Pampallona & Tsiatis class", deltaPT1 = "Delta1 for Pampallona & Tsiatis class", optimizationCriterion = "Optimization criterion for optimum design within Wang & Tsiatis class", gammaA = "Parameter for alpha spending function", gammaB = "Parameter for beta spending function", typeBetaSpending = "Type of beta spending", userAlphaSpending = "User defined alpha spending", userBetaSpending = "User defined beta spending", probs = "Exit probabilities", power = "Power", theta = "Effect", direction = "Direction", normalApproximation = "Normal approximation", equalVariances = "Equal variances", shift = "Shift", inflationFactor = "Inflation factor", information = "Informations", rejectionProbabilities = "Rejection probabilities under H1", futilityProbabilities = "Futility probabilities under H1", averageSampleNumber1 = "Ratio expected vs fixed sample size under H1", averageSampleNumber01 = "Ratio expected vs fixed sample size under a value between H0 and H1", averageSampleNumber0 = "Ratio expected vs fixed sample size under H0", allocationRatioPlanned = "Planned allocation ratio", thetaH0 = "Theta H0", thetaH1 = "Assumed effect under alternative", stDevH1 = "Assumed standard deviation under alternative", assumedStDev = "Assumed standard deviation", assumedStDevs = "Assumed standard deviations", pi1 = "Assumed treatment rate", pi2 = "Assumed control rate", overallPi1 = "Cumulative treatment rate", overallPi2 = "Cumulative control rate", pi1H1 = "pi(1) under H1", pi2H1 = "pi(2) under H1", nPlanned = "Planned sample size", piControl = "Assumed control rate", piControls = "Assumed control rates", piTreatment = "Assumed treatment rate", piTreatments = "Assumed treatment rates", piTreatmentH1 = "pi(treatment) under H1", piTreatmentsH1 = "pi(treatment) under H1", overallPiControl = "Cumulative control rate", overallPiTreatments = "Cumulative treatment rate", overallPisControl = "Cumulative control rate", overallPisTreatment = "Cumulative treatment rate", effectSizes = "Cumulative effect sizes", testStatistics = "Stage-wise test statistics", pValues = "Stage-wise p-values", testActions = "Actions", conditionalPower = "Conditional power", conditionalPowerAchieved = "Conditional power (achieved)", conditionalPowerSimulated = "Conditional power (simulated)", conditionalRejectionProbabilities = "Conditional rejection probability", repeatedConfidenceIntervalLowerBounds = "Repeated confidence intervals (lower)", repeatedConfidenceIntervalUpperBounds = "Repeated confidence intervals (upper)", repeatedPValues = "Repeated p-values", finalPValues = "Final p-value", finalConfidenceIntervalLowerBounds = "Final CIs (lower)", finalConfidenceIntervalUpperBounds = "Final CIs (upper)", medianUnbiasedEstimates = "Median unbiased estimate", overallSampleSizes = "Cumulative sample sizes", overallSampleSizes1 = "Cumulative sample sizes (1)", overallSampleSizes2 = "Cumulative sample sizes (2)", overallTestStatistics = "Overall test statistics", overallPValues = "Overall p-values", overallMeans = "Cumulative means", overallMeans1 = "Cumulative means (1)", overallMeans2 = "Cumulative means (2)", overallStDevs1 = "Cumulative standard deviations (1)", overallStDevs2 = "Cumulative standard deviations (2)", overallStDevs = "Cumulative (pooled) standard deviations", testStatistics = "Stage-wise test statistics", combInverseNormal = "Combination test statistics", # Inverse normal combination combFisher = "Combination test statistics", # Fisher combination weightsFisher = "Fixed weights", weightsInverseNormal = "Fixed weights", overallLogRanks = "Cumulative log-ranks", overallEvents = "Cumulative number of events", overallEvents1 = "Cumulative number of events (1)", overallEvents2 = "Cumulative number of events (2)", overallAllocationRatios = "Cumulative allocation ratios", events = "Number of events", allocationRatios = "Allocation ratios", logRanks = "Log-ranks", nMax = "N_max", averageSampleNumber = "Average sample sizes (ASN)", calculatedPower = "Power", earlyStop = "Early stop", rejectPerStage = "Reject per stage", futilityPerStage = "Futility stop per stage", overallEarlyStop = "Early stop", overallReject = "Overall reject", overallFutility = "Overall futility", riskRatio = "Risk ratio", meanRatio = "Mean ratio", alternative = "Alternatives", stDev = "Standard deviation", nFixed = "Number of subjects fixed", nFixed1 = "Number of subjects fixed (1)", nFixed2 = "Number of subjects fixed (2)", maxNumberOfSubjects = "Maximum number of subjects", maxNumberOfSubjects1 = "Maximum number of subjects (1)", maxNumberOfSubjects2 = "Maximum number of subjects (2)", numberOfSubjects = "Number of subjects", numberOfSubjects1 = "Number of subjects (1)", numberOfSubjects2 = "Number of subjects (2)", expectedNumberOfSubjectsH0 = "Expected number of subjects under H0", expectedNumberOfSubjectsH01 = "Expected number of subjects under H0/H1", expectedNumberOfSubjectsH1 = "Expected number of subjects under H1", expectedNumberOfSubjects = "Expected number of subjects", chi = "Probability of an event", hazardRatio = "Hazard ratio", hazardRatios = "Hazard ratios", typeOfComputation = "Type of computation", accountForObservationTimes = "Account for observation times", eventTime = "Event time", accrualTime = "Accrual time", totalAccrualTime = "Total accrual time", remainingTime = "Remaining time", followUpTime = "Follow up time", dropoutRate1 = "Drop-out rate (1)", dropoutRate2 = "Drop-out rate (2)", dropoutTime = "Drop-out time", eventsFixed = "Number of events fixed", expectedEventsH0 = "Expected number of events under H0", expectedEventsH01 = "Expected number of events under H0/H1", expectedEventsH1 = "Expected number of events under H1", analysisTime = "Analysis times", studyDurationH1 = "Expected study duration under H1", expectedNumberOfSubjectsH1 = "Expected number of subjects under H1", twoSidedPower = "Two-sided power", plannedEvents = "Planned cumulative events", plannedSubjects = "Planned cumulative subjects", # per arm (multi-arm); overall (base) minNumberOfEventsPerStage = "Minimum number of events per stage", maxNumberOfEventsPerStage = "Maximum number of events per stage", minNumberOfSubjectsPerStage = "Minimum number of subjects per stage", maxNumberOfSubjectsPerStage = "Maximum number of subjects per stage", accrualIntensity = "Accrual intensity", accrualIntensityRelative = "Accrual intensity (relative)", maxNumberOfIterations = "Maximum number of iterations", allocation1 = "Allocation 1", allocation2 = "Allocation 2", expectedNumberOfEvents = "Expected number of events", expectedNumberOfEventsPerStage = "Expected number of events by stage", eventsNotAchieved = "Events not achieved", subjects = "Subjects", overallReject = "Overall reject", futilityStop = "Overall futility stop", studyDuration = "Expected study duration", maxStudyDuration = "Maximal study duration", directionUpper = "Direction upper", piecewiseSurvivalTime = "Piecewise survival times", lambda1 = "lambda(1)", lambda2 = "lambda(2)", kappa = "kappa", earlyStopPerStage = "Early stop per stage", effect = "Effect", maxNumberOfEvents = "Maximum number of events", criticalValuesEffectScale = "Critical values (treatment effect scale)", criticalValuesEffectScaleDelayedInformation = "Upper bounds of continuation (treatment effect scale)", criticalValuesEffectScaleLower = "Lower critical values (treatment effect scale)", criticalValuesEffectScaleUpper = "Upper critical values (treatment effect scale)", criticalValuesPValueScale = "Local one-sided significance levels", ".design$stageLevels" = "Local one-sided significance levels", futilityBoundsEffectScale = "Futility bounds (treatment effect scale)", futilityBoundsEffectScaleDelayedInformation = "Lower bounds of continuation (treatment effect scale)", futilityBoundsEffectScaleLower = "Lower futility bounds (treatment effect scale)", futilityBoundsEffectScaleUpper = "Upper futility bounds (treatment effect scale)", futilityBoundsPValueScale = "Futility bounds (one-sided p-value scale)", futilityBoundsPValueScaleDelayedInformation = "Lower bounds of continuation (one-sided p-value scale)", analysisTime = "Analysis time", eventsPerStage1 = "Observed # events by stage (1)", eventsPerStage2 = "Observed # events by stage (2)", testStatistic = "Test statistic", logRankStatistic = "Log-rank statistic", hazardRatioEstimateLR = "Hazard ratio estimate LR", delayedResponseAllowed = "Delayed response allowed", delayedResponseEnabled = "Delayed response enabled", piecewiseSurvivalEnabled = "Piecewise exponential survival enabled", median1 = "median(1)", median2 = "median(2)", eventsPerStage = "Number of events per stage", overallEventsPerStage = "Cumulative number of events", expectedNumberOfEvents = "Observed number of events", expectedNumberOfSubjects = "Observed number of subjects", singleNumberOfEventsPerStage = "Single number of events", endOfAccrualIsUserDefined = "End of accrual is user defined", followUpTimeMustBeUserDefined = "Follow-up time must be user defined", maxNumberOfSubjectsIsUserDefined = "Max number of subjects is user defined", maxNumberOfSubjectsCanBeCalculatedDirectly = "Max number of subjects can be calculated directly", absoluteAccrualIntensityEnabled = "Absolute accrual intensity is enabled", time = "Time", cumulativeEventProbabilities = "Cumulative event probabilities", eventProbabilities1 = "Event probabilities (1)", eventProbabilities2 = "Event probabilities (2)", informationAtInterim = "Information at interim", secondStageConditioning = "Conditional second stage p-values", separatePValues = "Separate p-values", singleStepAdjustedPValues = "Single step adjusted p-values", intersectionTest = "Intersection test", varianceOption = "Variance option", overallPooledStDevs = "Cumulative (pooled) standard deviations", optimumAllocationRatio = "Optimum allocation ratio", rejected = "Rejected", indices = "Indices of hypothesis", adjustedStageWisePValues = "Adjusted stage-wise p-values", overallAdjustedTestStatistics = "Overall adjusted test statistics", rejectedIntersections = "Rejected intersections", conditionalErrorRate = "Conditional error rate", secondStagePValues = "Second stage p-values", effectMatrix = "Effect matrix", typeOfShape = "Type of shape", gED50 = "ED50", slope = "Slope", adaptations = "Adaptations", typeOfSelection = "Type of selection", effectMeasure = "Effect measure", successCriterion = "Success criterion", epsilonValue = "Epsilon value", rValue = "r value", threshold = "Threshold", rejectAtLeastOne = "Reject at least one", selectedArms = "Selected arms", rejectedArmsPerStage = "Rejected arms per stage", selectedPopulations = "Selected populations", rejectedPopulationsPerStage = "Rejected populations per stage", successPerStage = "Success per stage", effectEstimate = "Effect estimate", subjectsControlArm = "Subjects (control arm)", subjectsActiveArm = "Subjects (active arm)", pValue = "p-value", conditionalCriticalValue = "Conditional critical value", piControlH1 = "pi(control) under H1", piMaxVector = "pi_max", omegaMaxVector = "omega_max", muMaxVector = "mu_max", activeArms = "Active arms", populations = "Populations", numberOfEvents = "Number of events", calcSubjectsFunction = "Calculate subjects function", calcEventsFunction = "Calculate events function", selectArmsFunction = "Select arms function", numberOfActiveArms = "Number of active arms", selectPopulationsFunction = "Select populations function", numberOfPopulations = "Number of populations", correlationComputation = "Correlation computation method", subsets = "Subsets", subset = "Subset", stratifiedAnalysis = "Stratified analysis", maxInformation = "Maximum information", informationEpsilon = "Information epsilon", effectList = "Effect list", subGroups = "Sub-groups", prevalences = "Prevalences", effects = "Effects", situation = "Situation", delayedInformation = "Delayed information", decisionCriticalValues = "Decision critical values", reversalProbabilities = "Reversal probabilities", locationSampleSize = "Location sample sizes", variationSampleSize = "Variation sample sizes", subscoreSampleSize = "Sub-score sample sizes", locationConditionalPower = "Location conditional power", variationConditionalPower = "Variation conditional power", subscoreConditionalPower = "Sub-score conditional power", performanceScore = "Performance scores" ) C_TABLE_COLUMN_NAMES <- list( iterations = "Iterations", seed = "Seed", groups = "Treatment group", stages = "Stage", sampleSizes = "Sample size", means = "Mean", stDevs = "Standard deviation", overallEvents = "Cumulative event", overallAllocationRatios = "Cumulative allocation ratio", overallMeans = "Cumulative mean", expectedEvents = "Expected event", varianceEvents = "Variance of event", overallExpectedEvents = "Cumulative expected event", overallVarianceEvents = "Cumulative variance of event", bindingFutility = "Binding futility", constantBoundsHP = "Haybittle Peto constant", betaAdjustment = "Beta adjustment", kMax = "Maximum # stages", alpha = "Significance level", finalStage = "Final stage", informationRates = "Information rate", criticalValues = "Critical value", criticalValuesDelayedInformation = "Upper bounds of continuation", stageLevels = "Stage level", alphaSpent = "Cumulative alpha spending", tolerance = "Tolerance", method = "Method", alpha0Vec = "Alpha_0", scale = "Scale", nonStochasticCurtailment = "Non stochastic curtailment", simAlpha = "Simulated alpha", beta = "Type II error rate", betaSpent = "Cumulative beta spending", sided = "Test", futilityBounds = "Futility bound (binding)", futilityBoundsNonBinding = "Futility bound (non-binding)", futilityBoundsDelayedInformation = "Lower bounds of continuation (binding)", futilityBoundsDelayedInformationNonBinding = "Lower bounds of continuation (non-binding)", typeOfDesign = "Type of design", deltaWT = "Delta (Wang & Tsiatis)", deltaPT0 = "Delta0 (Pampallona & Tsiatis)", deltaPT1 = "Delta1 (Pampallona & Tsiatis)", optimizationCriterion = "Optimization criterion (Wang & Tsiatis)", gammaA = "Parameter for alpha spending function", gammaB = "Parameter for beta spending function", typeBetaSpending = "Type of beta spending", userAlphaSpending = "User defined alpha spending", userBetaSpending = "User defined beta spending", probs = "Internal calculation probabilities", power = "Power", theta = "Effect", direction = "Direction", normalApproximation = "Normal approximation", equalVariances = "Equal variance", assumedStDev = "Assumed standard deviation", assumedStDevs = "Assumed standard deviation", stDevH1 = "Assumed standard deviation under H1", shift = "Shift", inflationFactor = "Inflation factor", information = "Information", rejectionProbabilities = "Rejection probability under H1", futilityProbabilities = "Futility probability under H1", averageSampleNumber1 = "Ratio expected vs fixed sample size under H1", averageSampleNumber01 = "Ratio expected vs fixed sample size under a value between H0 and H1", averageSampleNumber0 = "Ratio expected vs fixed sample size under H0", allocationRatioPlanned = "Planned allocation ratio", thetaH0 = "Theta H0", # Effect thetaH1 = "Assumed effect", pi1 = "pi(1)", pi2 = "pi(2)", pi1H1 = "pi(1) under H1", pi2H1 = "pi(2) under H1", nPlanned = "Planned sample size", piControl = "Assumed control rate", piControls = "Assumed control rates", piTreatment = "Assumed treatment rate", piTreatments = "Assumed treatment rates", piTreatmentH1 = "pi(treatment) under H1", piTreatmentsH1 = "pi(treatment) under H1", overallPiControl = "Cumulative control rate", overallPiTreatments = "Cumulative treatment rate", overallPisControl = "Cumulative control rate", overallPisTreatment = "Cumulative treatment rate", stages = "Stage", effectSizes = "Overall effect size", testStatistics = "Stage-wise test statistic", pValues = "p-value", testActions = "Action", conditionalPower = "Conditional power", conditionalPowerAchieved = "Conditional power (achieved)", conditionalPowerSimulated = "Conditional power (simulated)", conditionalRejectionProbabilities = "Conditional rejection probabilities", repeatedConfidenceIntervalLowerBounds = "Repeated confidence interval (lower)", repeatedConfidenceIntervalUpperBounds = "Repeated confidence interval (upper)", repeatedPValues = "Repeated p-value", finalPValues = "Final p-value", finalConfidenceIntervalLowerBounds = "Final CI (lower)", finalConfidenceIntervalUpperBounds = "Final CI (upper)", medianUnbiasedEstimates = "Median unbiased estimate", overallSampleSizes = "Cumulative sample size", overallSampleSizes1 = "Cumulative sample size (1)", overallSampleSizes2 = "Cumulative sample size (2)", overallTestStatistics = "Overall test statistic", overallPValues = "Overall p-value", overallMeans1 = "Cumulative mean (1)", overallMeans2 = "Cumulative mean (2)", overallStDevs1 = "Cumulative standard deviation (1)", overallStDevs2 = "Cumulative standard deviation (2)", overallStDevs = "Cumulative (pooled) standard deviation", testStatistics = "Test statistic", combInverseNormal = "Inverse Normal Combination", combFisher = "Fisher Combination", weightsFisher = "Fixed weight", weightsInverseNormal = "Fixed weight", overallLogRanks = "Cumulative log-rank", overallEvents = "Cumulative # events", overallEvents1 = "Cumulative # events (1)", overallEvents2 = "Cumulative # events (2)", overallAllocationRatios = "Cumulative allocation ratio", events = "# events", allocationRatios = "Allocation ratio", logRanks = "Log-rank", nMax = "N_max", averageSampleNumber = "Average sample size (ASN)", calculatedPower = "Power", earlyStop = "Early stop", rejectPerStage = "Reject per stage", futilityPerStage = "Futility stop per stage", overallEarlyStop = "Early stop", overallReject = "Overall reject", overallFutility = "Overall futility", riskRatio = "Risk ratio", meanRatio = "Mean ratio", alternative = "Alternative", stDev = "Standard deviation", nFixed = "# subjects fixed", nFixed1 = "# subjects fixed (1)", nFixed2 = "# subjects fixed (2)", maxNumberOfSubjects = "Max # subjects", maxNumberOfSubjects1 = "Max # subjects (1)", maxNumberOfSubjects2 = "Max # subjects (2)", numberOfSubjects = "# subjects", numberOfSubjects1 = "# subjects (1)", numberOfSubjects2 = "# subjects (2)", expectedNumberOfSubjectsH0 = "Expected # subjects under H0", expectedNumberOfSubjectsH01 = "Expected # subjects under H0/H1", expectedNumberOfSubjectsH1 = "Expected # subjects under H1", expectedNumberOfSubjects = "Expected # subjects", chi = "Probability of an event", hazardRatio = "Hazard ratio", hazardRatios = "Hazard ratios", typeOfComputation = "Type of computation", accountForObservationTimes = "Account for observation times", eventTime = "Event time", accrualTime = "Accrual time", totalAccrualTime = "Total accrual time", remainingTime = "Remaining time", followUpTime = "Follow up time", dropoutRate1 = "Drop-out rate (1)", dropoutRate2 = "Drop-out rate (2)", dropoutTime = "Drop-out time", eventsFixed = "# events fixed", expectedEventsH0 = "Expected # events under H0", expectedEventsH01 = "Expected # events under H0/H1", expectedEventsH1 = "Expected # events under H1", analysisTime = "Analysis time", eventsPerStage1 = "Observed # events by stage (1)", eventsPerStage2 = "Observed # events by stage (2)", studyDurationH1 = "Expected study duration H1", expectedNumberOfSubjectsH1 = "Expected # subjects H1", twoSidedPower = "Two-sided power", plannedEvents = "Planned cumulative events", plannedSubjects = "Planned cumulative subjects", minNumberOfEventsPerStage = "Minimum # events per stage", maxNumberOfEventsPerStage = "Maximum # events per stage", minNumberOfSubjectsPerStage = "Minimum # of subjects per stage", maxNumberOfSubjectsPerStage = "Maximum # of subjects per stage", accrualIntensity = "Accrual intensity", accrualIntensityRelative = "Accrual intensity (relative)", maxNumberOfIterations = "Maximum # iterations", allocation1 = "Allocation 1", allocation2 = "Allocation 2", expectedNumberOfEvents = "Expected # events", expectedNumberOfEventsPerStage = "Expected # events by stage", eventsNotAchieved = "Events not achieved", subjects = "Subjects", futilityStop = "Overall futility stop", studyDuration = "Expected study duration", maxStudyDuration = "Maximal study duration", directionUpper = "Direction upper", piecewiseSurvivalTime = "Piecewise survival times", lambda1 = "lambda(1)", lambda2 = "lambda(2)", kappa = "kappa", earlyStopPerStage = "Early stop per stage", effect = "Effect", maxNumberOfEvents = "Maximum # events", criticalValuesEffectScale = "Critical value (treatment effect scale)", criticalValuesEffectScaleDelayedInformation = "Upper bound of continuation (treatment effect scale)", criticalValuesEffectScaleLower = "Lower critical value (treatment effect scale)", criticalValuesEffectScaleUpper = "Upper critical value (treatment effect scale)", criticalValuesPValueScale = "Local one-sided significance level", ".design$stageLevels" = "Local one-sided significance level", futilityBoundsEffectScale = "Futility bound (treatment effect scale)", futilityBoundsEffectScaleDelayedInformation = "Lower bounds of continuation (treatment effect scale)", futilityBoundsEffectScaleLower = "Lower futility bound (treatment effect scale)", futilityBoundsEffectScaleUpper = "Upper futility bound (treatment effect scale)", futilityBoundsPValueScale = "Futility bound (one-sided p-value scale)", futilityBoundsPValueScaleDelayedInformation = "Lower bound of continuation (one-sided p-value scale)", delayedResponseAllowed = "Delayed response allowed", delayedResponseEnabled = "Delayed response enabled", piecewiseSurvivalEnabled = "Piecewise exponential survival enabled", median1 = "median(1)", median2 = "median(2)", eventsPerStage = "Cumulative # events", eventsPerStage = "# events per stage", overallEventsPerStage = "Cumulative # events", expectedNumberOfEvents = "Observed # events", expectedNumberOfSubjects = "Observed # subjects", singleNumberOfEventsPerStage = "Single # events", endOfAccrualIsUserDefined = "End of accrual is user defined", followUpTimeMustBeUserDefined = "Follow-up time must be user defined", maxNumberOfSubjectsIsUserDefined = "Max number of subjects is user defined", maxNumberOfSubjectsCanBeCalculatedDirectly = "Max number of subjects can be calculated directly", absoluteAccrualIntensityEnabled = "Absolute accrual intensity is enabled", time = "Time", cumulativeEventProbabilities = "Cumulative event probability", eventProbabilities1 = "Event probability (1)", eventProbabilities2 = "Event probability (2)", informationAtInterim = "Information at interim", secondStageConditioning = "Conditional second stage p-value", separatePValues = "Separate p-value", singleStepAdjustedPValues = "Single step adjusted p-value", intersectionTest = "Intersection test", varianceOption = "Variance option", overallPooledStDevs = "Cumulative (pooled) standard deviation", optimumAllocationRatio = "Optimum allocation ratio", rejected = "Rejected", indices = "Indices of hypothesis", adjustedStageWisePValues = "Adjusted stage-wise p-value", overallAdjustedTestStatistics = "Overall adjusted test statistics", rejectedIntersections = "Rejected intersection", conditionalErrorRate = "Conditional error rate", secondStagePValues = "Second stage p-value", effectMatrix = "Effect matrix", typeOfShape = "Type of shape", gED50 = "ED50", slope = "Slope", adaptations = "Adaptations", typeOfSelection = "Type of selection", effectMeasure = "Effect measure", successCriterion = "Success criterion", epsilonValue = "Epsilon value", rValue = "r value", threshold = "Threshold", rejectAtLeastOne = "Reject at least one", selectedArms = "Selected arm", rejectedArmsPerStage = "Rejected arm per stage", successPerStage = "Success per stage", effectEstimate = "Effect estimate", subjectsControlArm = "Subjects (control arm)", subjectsActiveArm = "Subjects (active arm)", pValue = "p-value", conditionalCriticalValue = "Conditional critical value", piControlH1 = "pi(control) under H1", piMaxVector = "pi_max", omegaMaxVector = "omega_max", muMaxVector = "mu_max", activeArms = "Active arm", populations = "Population", numberOfEvents = "Number of events", calcSubjectsFunction = "Calc subjects fun", calcEventsFunction = "Calc events fun", selectArmsFunction = "Select arms fun", numberOfActiveArms = "Number of active arms", correlationComputation = "Correlation computation", subsets = "Subset", subset = "Subset", stratifiedAnalysis = "Stratified analysis", maxInformation = "Maximum information", informationEpsilon = "Information epsilon", effectList = "Effect list", subGroups = "Sub-group", prevalences = "Prevalence", effects = "Effect", situation = "Situation", delayedInformation = "Delayed information", decisionCriticalValues = "Decision critical value", reversalProbabilities = "Reversal probability", locationSampleSize = "Location sample size", variationSampleSize = "Variation sample size", subscoreSampleSize = "Sub-score sample size", locationConditionalPower = "Location conditional power", variationConditionalPower = "Variation conditional power", subscoreConditionalPower = "Sub-score conditional power", performanceScore = "Performance score" ) .getParameterCaptions <- function(captionList, ..., design = NULL, designPlan = NULL, stageResults = NULL, analysisResults = NULL, dataset = NULL, designCharacteristics = NULL, tableColumns = FALSE) { parameterNames <- captionList if (!is.null(design)) { parameterNameFutilityBounds <- "futilityBounds" if (.isDelayedInformationEnabled(design = design)) { if (!is.na(design$bindingFutility) && !design$bindingFutility) { parameterNameFutilityBounds <- "futilityBoundsDelayedInformationNonBinding" } else { parameterNameFutilityBounds <- "futilityBoundsDelayedInformation" } parameterNames$criticalValues <- captionList[["criticalValuesDelayedInformation"]] parameterNames$criticalValuesEffectScale <- captionList[["criticalValuesEffectScaleDelayedInformation"]] parameterNames$futilityBoundsEffectScale <- captionList[["futilityBoundsEffectScaleDelayedInformation"]] parameterNames$futilityBoundsPValueScale <- captionList[["futilityBoundsPValueScaleDelayedInformation"]] } else if (!is.na(design$bindingFutility) && !design$bindingFutility) { parameterNameFutilityBounds <- "futilityBoundsNonBinding" } parameterNames$futilityBounds <- captionList[[parameterNameFutilityBounds]] } if (!is.null(designPlan) && inherits(designPlan, "TrialDesignPlanSurvival") && !is.null(designPlan$.piecewiseSurvivalTime) && designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { parameterNames$lambda2 <- "Piecewise survival lambda (2)" parameterNames$lambda1 <- "Piecewise survival lambda (1)" } if (!is.null(designPlan) && inherits(designPlan, "TrialDesignPlanSurvival") && identical(designPlan$.design$kMax, 1L)) { parameterNames$maxNumberOfEvents <- "Number of events" } if (!is.null(designPlan) && inherits(designPlan, "TrialDesignPlan") && identical(designPlan$.design$kMax, 1L)) { parameterNames$studyDuration <- "Study duration" } if (!is.null(analysisResults)) { pluralExt <- ifelse(tableColumns, "", "s") if (.isTrialDesignConditionalDunnett(analysisResults$.design)) { parameterNames$repeatedConfidenceIntervalLowerBounds <- paste0("Overall confidence interval", pluralExt, " (lower)") parameterNames$repeatedConfidenceIntervalUpperBounds <- paste0("Overall confidence interval", pluralExt, " (upper)") parameterNames$repeatedPValues <- paste0("Overall p-value", pluralExt) } else if (identical(analysisResults$.design$kMax, 1L)) { parameterNames$repeatedConfidenceIntervalLowerBounds <- paste0("Confidence interval", pluralExt, " (lower)") parameterNames$repeatedConfidenceIntervalUpperBounds <- paste0("Confidence interval", pluralExt, " (upper)") parameterNames$repeatedPValues <- paste0("Overall p-value", pluralExt) } } if (!is.null(designPlan) && (inherits(designPlan, "TrialDesignPlanMeans") || inherits(designPlan, "SimulationResultsMeans")) && isTRUE(designPlan$meanRatio)) { parameterNames$stDev <- "Coefficient of variation" } if (!is.null(design) && .getClassName(design) != "TrialDesign" && design$sided == 2) { parameterNames$criticalValuesPValueScale <- paste0("Local two-sided significance level", ifelse(tableColumns, "", "s")) } if ((!is.null(stageResults) && stageResults$isOneSampleDataset()) || (!is.null(dataset) && inherits(dataset, "DatasetMeans"))) { parameterNames$overallStDevs <- paste0("Cumulative standard deviation", ifelse(tableColumns, "", "s")) } return(parameterNames) } .getParameterNames <- function(..., design = NULL, designPlan = NULL, stageResults = NULL, analysisResults = NULL, dataset = NULL, designCharacteristics = NULL) { .getParameterCaptions( captionList = C_PARAMETER_NAMES, design = design, designPlan = designPlan, stageResults = stageResults, analysisResults = analysisResults, dataset = dataset, designCharacteristics = designCharacteristics ) } .getTableColumnNames <- function(..., design = NULL, designPlan = NULL, stageResults = NULL, analysisResults = NULL, dataset = NULL, designCharacteristics = NULL) { .getParameterCaptions( captionList = C_TABLE_COLUMN_NAMES, design = design, designPlan = designPlan, stageResults = stageResults, analysisResults = analysisResults, dataset = dataset, designCharacteristics = designCharacteristics, tableColumns = TRUE ) } C_PARAMETER_FORMAT_FUNCTIONS <- list( means = ".formatMeans", stDevs = ".formatStDevs", stDev = ".formatStDevs", thetaH0 = ".formatStDevs", alternative = ".formatStDevs", assumedStDev = ".formatStDevs", assumedStDevs = ".formatStDevs", overallAllocationRatios = ".formatRatios", allocationRatioPlanned = ".formatRatios", alpha = ".formatProbabilities", beta = ".formatProbabilities", informationRates = ".formatRates", stageLevels = ".formatProbabilities", alphaSpent = ".formatProbabilities", alpha0Vec = ".formatProbabilities", simAlpha = ".formatProbabilities", criticalValues = ".formatCriticalValuesFisher", # will be set in class TrialDesignFisher criticalValues = ".formatCriticalValues", # will be set in class TrialDesignGroupSequential betaSpent = ".formatProbabilities", futilityBounds = ".formatCriticalValues", alpha0Vec = ".formatProbabilities", constantBoundsHP = ".formatCriticalValues", nMax = ".formatProbabilities", nFixed = ".formatSampleSizes", nFixed1 = ".formatSampleSizes", nFixed2 = ".formatSampleSizes", shift = ".formatProbabilities", inflationFactor = ".formatProbabilities", information = ".formatRates", power = ".formatProbabilities", rejectionProbabilities = ".formatProbabilities", futilityProbabilities = ".formatFutilityProbabilities", probs = ".formatProbabilities", averageSampleNumber1 = ".formatProbabilities", averageSampleNumber01 = ".formatProbabilities", averageSampleNumber0 = ".formatProbabilities", effectSizes = ".formatMeans", thetaH1 = ".formatMeans", stDevH1 = ".formatStDevs", testStatistics = ".formatTestStatistics", pValues = ".formatPValues", conditionalPower = ".formatConditionalPower", conditionalPowerAchieved = ".formatConditionalPower", conditionalPowerSimulated = ".formatConditionalPower", conditionalRejectionProbabilities = ".formatProbabilities", repeatedConfidenceIntervalLowerBounds = ".formatMeans", repeatedConfidenceIntervalUpperBounds = ".formatMeans", repeatedPValues = ".formatRepeatedPValues", finalPValues = ".formatPValues", finalConfidenceIntervalLowerBounds = ".formatMeans", finalConfidenceIntervalUpperBounds = ".formatMeans", medianUnbiasedEstimates = ".formatMeans", overallTestStatistics = ".formatTestStatistics", overallPValues = ".formatPValues", overallMeans = ".formatMeans", overallMeans1 = ".formatMeans", overallMeans2 = ".formatMeans", overallStDevs1 = ".formatStDevs", overallStDevs2 = ".formatStDevs", overallStDevs = ".formatStDevs", overallPooledStDevs = ".formatStDevs", testStatistics = ".formatTestStatistics", combInverseNormal = ".formatTestStatistics", combFisher = ".formatTestStatisticsFisher", weightsFisher = ".formatRates", weightsInverseNormal = ".formatRates", overallLogRanks = ".formatTestStatistics", logRanks = ".formatTestStatistics", theta = ".formatMeans", averageSampleNumber = ".formatCriticalValues", # ".formatSampleSizes", calculatedPower = ".formatProbabilities", earlyStop = ".formatProbabilities", rejectPerStage = ".formatProbabilities", futilityPerStage = ".formatProbabilities", overallEarlyStop = ".formatProbabilities", overallReject = ".formatProbabilities", overallFutility = ".formatProbabilities", earlyStopPerStage = ".formatProbabilities", effect = ".formatMeans", maxNumberOfSubjects = ".formatSampleSizes", maxNumberOfSubjects1 = ".formatSampleSizes", maxNumberOfSubjects2 = ".formatSampleSizes", maxNumberOfEvents = ".formatEvents", numberOfSubjects = ".formatSampleSizes", numberOfSubjects1 = ".formatSampleSizes", numberOfSubjects2 = ".formatSampleSizes", expectedNumberOfSubjectsH0 = ".formatSampleSizes", expectedNumberOfSubjectsH01 = ".formatSampleSizes", expectedNumberOfSubjectsH1 = ".formatSampleSizes", expectedNumberOfSubjects = ".formatSampleSizes", chi = ".formatRates", hazardRatio = ".formatRates", hazardRatios = ".formatRates", pi1 = ".formatRates", pi2 = ".formatRates", pi1H1 = ".formatRates", pi2H1 = ".formatRates", piecewiseSurvivalTime = ".formatTime", lambda2 = ".formatRates", lambda1 = ".formatRates", eventTime = ".formatEventTime", accrualTime = ".formatTime", totalAccrualTime = ".formatTime", remainingTime = ".formatTime", followUpTime = ".formatTime", dropoutRate1 = ".formatRates", dropoutRate2 = ".formatRates", dropoutTime = ".formatTime", eventsFixed = ".formatEvents", expectedEventsH0 = ".formatEvents", expectedEventsH01 = ".formatEvents", expectedEventsH1 = ".formatEvents", analysisTime = ".formatTime", studyDurationH1 = ".formatDurations", expectedNumberOfSubjectsH1 = ".formatSampleSizes", expectedEvents = ".formatEvents", varianceEvents = ".formatEvents", overallExpectedEvents = ".formatEvents", overallVarianceEvents = ".formatEvents", events = ".formatEvents", overallEvents = ".formatEvents", expectedNumberOfEvents = ".formatEvents", expectedNumberOfEventsPerStage = ".formatEvents", eventsNotAchieved = ".formatRates", subjects = ".formatSampleSizes", futilityStop = ".formatProbabilities", studyDuration = ".formatDurations", maxStudyDuration = ".formatDurations", criticalValuesEffectScale = ".formatCriticalValues", criticalValuesEffectScaleLower = ".formatCriticalValues", criticalValuesEffectScaleUpper = ".formatCriticalValues", criticalValuesPValueScale = ".formatProbabilities", futilityBoundsEffectScale = ".formatCriticalValues", futilityBoundsPValueScale = ".formatProbabilities", median1 = ".formatRatesDynamic", median2 = ".formatRatesDynamic", accrualIntensity = ".formatAccrualIntensities", accrualIntensityRelative = ".formatAccrualIntensities", eventsPerStage = ".formatEvents", expectedNumberOfEvents = ".formatEvents", expectedNumberOfSubjects = ".formatEvents", singleNumberOfEventsPerStage = ".formatEvents", time = ".formatTime", cumulativeEventProbabilities = ".formatProbabilities", eventProbabilities1 = ".formatProbabilities", eventProbabilities2 = ".formatProbabilities", informationAtInterim = ".formatRates", separatePValues = ".formatPValues", singleStepAdjustedPValues = ".formatPValues", userAlphaSpending = ".formatHowItIs", userBetaSpending = ".formatHowItIs", piControl = ".formatRates", piControls = ".formatRates", piTreatment = ".formatRates", piTreatments = ".formatRates", piTreatmentH1 = ".formatRates", piTreatmentsH1 = ".formatRates", overallPiControl = ".formatRates", overallPiTreatments = ".formatRates", overallPisControl = ".formatRates", overallPisTreatment = ".formatRates", adjustedStageWisePValues = ".formatPValues", overallAdjustedTestStatistics = ".formatTestStatisticsFisher", # will be set in class ClosedCombinationTestResults overallAdjustedTestStatistics = ".formatTestStatistics", conditionalErrorRate = ".formatProbabilities", secondStagePValues = ".formatPValues", sampleSizes = ".formatSampleSizes", overallSampleSizes = ".formatSampleSizes", effectMatrix = ".formatMeans", gED50 = ".formatHowItIs", slope = ".formatHowItIs", epsilonValue = ".formatHowItIs", threshold = ".formatHowItIs", rejectAtLeastOne = ".formatProbabilities", selectedArms = ".formatProbabilities", rejectedArmsPerStage = ".formatProbabilities", successPerStage = ".formatProbabilities", effectEstimate = ".formatMeans", subjectsControlArm = ".formatSampleSizes", subjectsActiveArm = ".formatSampleSizes", pValue = ".formatPValues", conditionalCriticalValue = ".formatCriticalValues", piControlH1 = ".formatRates", piMaxVector = ".formatRates", omegaMaxVector = ".formatRates", muMaxVector = ".formatMeans", numberOfEvents = ".formatEvents", numberOfActiveArms = ".formatRates", maxInformation = ".formatHowItIs", informationEpsilon = ".formatProbabilities", delayedInformation = ".formatRates", decisionCriticalValues = ".formatCriticalValues", reversalProbabilities = ".formatProbabilities", locationSampleSize = ".formatProbabilities", variationSampleSize = ".formatProbabilities", subscoreSampleSize = ".formatProbabilities", locationConditionalPower = ".formatProbabilities", variationConditionalPower = ".formatProbabilities", subscoreConditionalPower = ".formatProbabilities", performanceScore = ".formatProbabilities" ) rpact/R/f_design_utilities.R0000644000176200001440000012051114445307575015576 0ustar liggesusers## | ## | *Design utilities* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_assertions.R #' @include f_core_utilities.R NULL .getInformationRatesDefault <- function(kMax) { return(c(1:kMax) / kMax) } .getDefaultDesign <- function(..., type = c("sampleSize", "power", "simulation", "analysis", "characteristics"), ignore = c()) { type <- match.arg(type) alpha <- .getOptionalArgument("alpha", ...) if (is.null(alpha)) { alpha <- NA_real_ } else { ignore <- c(ignore, "alpha") } beta <- .getOptionalArgument("beta", ...) if (is.null(beta)) { beta <- NA_real_ } else { ignore <- c(ignore, "beta") } sided <- .getOptionalArgument("sided", ...) if (is.null(sided)) { sided <- 1L } else { ignore <- c(ignore, "sided") } twoSidedPower <- .getOptionalArgument("twoSidedPower", ...) if (is.null(twoSidedPower)) { if (type %in% c("power", "simulation") && sided == 2) { twoSidedPower <- TRUE } else { twoSidedPower <- C_TWO_SIDED_POWER_DEFAULT } } else { ignore <- c(ignore, "twoSidedPower") } if (type %in% c("analysis", "simulation")) { design <- getDesignInverseNormal( kMax = 1, alpha = alpha, beta = beta, sided = sided, twoSidedPower = twoSidedPower ) } else { design <- getDesignGroupSequential( kMax = 1, alpha = alpha, beta = beta, sided = sided, twoSidedPower = twoSidedPower ) } return(design) } .getDesignArgumentsToIgnoreAtUnknownArgumentCheck <- function(design, powerCalculationEnabled = FALSE) { baseArgsToIgnore <- c("showObservedInformationRatesMessage", "showWarnings") if (design$kMax > 1) { return(baseArgsToIgnore) } if (powerCalculationEnabled) { return(c(baseArgsToIgnore, "alpha", "sided")) } return(c(baseArgsToIgnore, "alpha", "beta", "sided", "twoSidedPower")) } .getValidatedFutilityBounds <- function(design, kMaxLowerBound = 1, writeToDesign = TRUE, twoSidedWarningForDefaultValues = TRUE) { .assertIsTrialDesignInverseNormalOrGroupSequential(design) return(.getValidatedFutilityBoundsOrAlpha0Vec( design = design, parameterName = "futilityBounds", defaultValue = C_FUTILITY_BOUNDS_DEFAULT, kMaxLowerBound = kMaxLowerBound, writeToDesign = writeToDesign, twoSidedWarningForDefaultValues = twoSidedWarningForDefaultValues )) } .getValidatedAlpha0Vec <- function(design, kMaxLowerBound = 1, writeToDesign = TRUE, twoSidedWarningForDefaultValues = TRUE) { .assertIsTrialDesignFisher(design) return(.getValidatedFutilityBoundsOrAlpha0Vec( design = design, parameterName = "alpha0Vec", defaultValue = C_ALPHA_0_VEC_DEFAULT, kMaxLowerBound = kMaxLowerBound, writeToDesign = writeToDesign, twoSidedWarningForDefaultValues = twoSidedWarningForDefaultValues )) } .getValidatedFutilityBoundsOrAlpha0Vec <- function(design, parameterName, defaultValue, kMaxLowerBound, writeToDesign, twoSidedWarningForDefaultValues = TRUE) { parameterValues <- design[[parameterName]] if (length(parameterValues) > 1) { .assertIsNumericVector(parameterValues, parameterName, naAllowed = TRUE) } kMaxUpperBound <- ifelse(.isTrialDesignFisher(design), C_KMAX_UPPER_BOUND_FISHER, C_KMAX_UPPER_BOUND) if (.isDefinedArgument(parameterValues) && .isDefinedArgument(design$kMax)) { if (.isTrialDesignFisher(design)) { .assertIsValidAlpha0Vec(parameterValues, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound ) } else { .assertAreValidFutilityBounds(parameterValues, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound ) } } if (design$sided == 2 && .isDefinedArgument(parameterValues) && (!.isTrialDesignInverseNormalOrGroupSequential(design) || (design$typeOfDesign != C_TYPE_OF_DESIGN_PT) && !.isBetaSpendingDesignType(design$typeBetaSpending) ) && (twoSidedWarningForDefaultValues && !all(is.na(parameterValues)) || (!twoSidedWarningForDefaultValues && any(na.omit(parameterValues) != defaultValue)))) { warning("'", parameterName, "' (", .arrayToString(parameterValues), ") will be ignored because the design is two-sided", call. = FALSE ) parameterValues <- rep(defaultValue, design$kMax - 1) } if (writeToDesign) { .setParameterType(design, parameterName, C_PARAM_USER_DEFINED) } if (.isUndefinedArgument(design$informationRates) && .isUndefinedArgument(parameterValues)) { if (writeToDesign) { if (.setKMaxToDefaultIfUndefined(design, writeToDesign) || design$kMax == C_KMAX_DEFAULT) { .setParameterType(design, parameterName, C_PARAM_DEFAULT_VALUE) } else { .setParameterType(design, parameterName, C_PARAM_DERIVED) } } return(rep(defaultValue, design$kMax - 1)) } if (.isDefinedArgument(design$informationRates) && .isUndefinedArgument(parameterValues)) { if (writeToDesign) { if (.isUndefinedArgument(design$kMax)) { .setKMax(design, kMax = length(design$informationRates)) } .setParameterType(design, parameterName, ifelse(design$kMax == C_KMAX_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_DERIVED )) } return(rep(defaultValue, design$kMax - 1)) } if (.isUndefinedArgument(design$informationRates) && .isDefinedArgument(parameterValues, argumentExistsValidationEnabled = FALSE)) { if (writeToDesign) { .setKMax(design, kMax = length(parameterValues) + 1) if (.isDefaultVector(parameterValues, rep(defaultValue, design$kMax - 1))) { .setParameterType(design, parameterName, C_PARAM_DEFAULT_VALUE) } } if (.isBetaSpendingOrPampallonaTsiatisDesignWithDefinedFutilityBounds(design, parameterName, writeToDesign)) { return(rep(defaultValue, design$kMax - 1)) } return(parameterValues) } if (writeToDesign) { .setKMax(design, kMax = length(parameterValues) + 1) if (.isDefaultVector(parameterValues, rep(defaultValue, design$kMax - 1))) { .setParameterType(design, parameterName, C_PARAM_DEFAULT_VALUE) } } if (.isTrialDesignFisher(design)) { .assertIsValidAlpha0Vec(parameterValues, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound ) } else { .assertAreValidFutilityBounds(parameterValues, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound ) } if (.isBetaSpendingOrPampallonaTsiatisDesignWithDefinedFutilityBounds(design, parameterName, writeToDesign)) { return(rep(defaultValue, design$kMax - 1)) } return(parameterValues) } # Check whether design is a beta spending or Pampallona Tsiatis design .isBetaSpendingOrPampallonaTsiatisDesignWithDefinedFutilityBounds <- function(design, parameterName, writeToDesign) { if (.isTrialDesignFisher(design)) { return(FALSE) } if (!.isBetaSpendingDesignType(design$typeBetaSpending) && design$typeOfDesign != C_TYPE_OF_DESIGN_PT) { return(FALSE) } if (design$.getParameterType(parameterName) == C_PARAM_USER_DEFINED) { warning("'", parameterName, "' (", .arrayToString(design[[parameterName]]), ") will be ignored because it will be calculated", call. = FALSE ) } else if (design$.getParameterType(parameterName) == C_PARAM_GENERATED) { return(FALSE) } if (writeToDesign) { .setParameterType(design, parameterName, C_PARAM_DEFAULT_VALUE) } return(TRUE) } .setKMax <- function(design, kMax) { design$kMax <- as.integer(kMax) .setParameterType(design, "kMax", C_PARAM_DERIVED) invisible(kMax) } .getValidatedInformationRates <- function(design, kMaxLowerBound = 1L, writeToDesign = TRUE) { kMaxUpperBound <- ifelse(.isTrialDesignFisher(design), C_KMAX_UPPER_BOUND_FISHER, C_KMAX_UPPER_BOUND) if (.isDefinedArgument(design$informationRates) && .isDefinedArgument(design$kMax)) { .assertAreValidInformationRates( informationRates = design$informationRates, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound ) } .setParameterType(design, "informationRates", C_PARAM_USER_DEFINED) if (.isTrialDesignFisher(design)) { futilityBounds <- design$alpha0Vec } else { futilityBounds <- design$futilityBounds } if (.isUndefinedArgument(design$informationRates) && .isUndefinedArgument(futilityBounds)) { if (writeToDesign) { if (.setKMaxToDefaultIfUndefined(design, writeToDesign) || design$kMax == C_KMAX_DEFAULT) { .setParameterType(design, "informationRates", C_PARAM_DEFAULT_VALUE) } else { .setParameterType(design, "informationRates", C_PARAM_DERIVED) } } return((1:design$kMax) / design$kMax) } if (.isDefinedArgument(design$informationRates) && .isUndefinedArgument(futilityBounds)) { if (writeToDesign) { .setKMax(design, kMax = length(design$informationRates)) if (.isDefaultVector(design$informationRates, (1:design$kMax) / design$kMax)) { .setParameterType(design, "informationRates", C_PARAM_DEFAULT_VALUE) } } .assertAreValidInformationRates( informationRates = design$informationRates, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound ) return(design$informationRates) } if (.isUndefinedArgument(design$informationRates) && .isDefinedArgument(futilityBounds, argumentExistsValidationEnabled = FALSE)) { if (writeToDesign) { if (.isUndefinedArgument(design$kMax)) { .setKMax(design, kMax = length(futilityBounds) + 1) } .setParameterType(design, "informationRates", ifelse(design$kMax == C_KMAX_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_DERIVED )) } return((1:design$kMax) / design$kMax) } if (writeToDesign) { .setKMax(design, kMax = length(design$informationRates)) if (.isDefaultVector(design$informationRates, (1:design$kMax) / design$kMax)) { .setParameterType(design, "informationRates", C_PARAM_DEFAULT_VALUE) } } .assertAreValidInformationRates( informationRates = design$informationRates, kMax = design$kMax, kMaxLowerBound = kMaxLowerBound, kMaxUpperBound = kMaxUpperBound ) return(design$informationRates) } .setKMaxToDefaultIfUndefined <- function(design, writeToDesign = TRUE) { if (writeToDesign && .isUndefinedArgument(design$kMax)) { design$kMax <- C_KMAX_DEFAULT design$.setParameterType("kMax", C_PARAM_DEFAULT_VALUE) return(TRUE) } return(FALSE) } .validateAlphaAndBeta <- function(design) { .assertDesignParameterExists(design, "alpha", C_ALPHA_DEFAULT) .assertDesignParameterExists(design, "beta", C_BETA_DEFAULT) .assertIsValidAlphaAndBeta(alpha = design$alpha, beta = design$beta) } .validateUserAlphaSpending <- function(design) { .assertIsTrialDesign(design) .assertDesignParameterExists(design, "userAlphaSpending", NA_real_) if ((design$isUserDefinedParameter("informationRates") || (design$isDefaultParameter("informationRates") && !design$isUserDefinedParameter("kMax"))) && length(design$informationRates) != length(design$userAlphaSpending)) { stop(sprintf( paste0( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'userAlphaSpending' (%s) must be equal to length of 'informationRates' (%s)" ), length(design$userAlphaSpending), length(design$informationRates) )) } if (length(design$userAlphaSpending) != design$kMax) { stop(sprintf( paste0( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'userAlphaSpending' (%s) must be equal to 'kMax' (%s)" ), length(design$userAlphaSpending), design$kMax )) } .validateUserAlphaSpendingLength(design) if (.isUndefinedArgument(design$alpha)) { design$alpha <- design$userAlphaSpending[design$kMax] design$.setParameterType("alpha", ifelse(design$alpha == C_ALPHA_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_DERIVED )) } .assertIsValidAlpha(design$alpha) if (design$kMax > 1 && (design$userAlphaSpending[1] < 0 || design$userAlphaSpending[design$kMax] > design$alpha || any(design$userAlphaSpending[2:design$kMax] - design$userAlphaSpending[1:(design$kMax - 1)] < 0))) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'userAlphaSpending' = %s must be a vector that satisfies the following condition: ", "0 <= alpha_1 <= .. <= alpha_%s <= alpha = %s" ), .arrayToString(design$userAlphaSpending, vectorLookAndFeelEnabled = TRUE), design$kMax, design$alpha )) } } .validateUserBetaSpending <- function(design) { .assertIsTrialDesign(design) .assertDesignParameterExists(design, "userBetaSpending", NA_real_) if ((design$isUserDefinedParameter("informationRates") || (design$isDefaultParameter("informationRates") && !design$isUserDefinedParameter("kMax"))) && length(design$informationRates) != length(design$userBetaSpending)) { stop(sprintf( paste0( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'userBetaSpending' (%s) must be equal to length of 'informationRates' (%s)" ), length(design$userBetaSpending), length(design$informationRates) )) } if (length(design$userBetaSpending) != design$kMax) { stop(sprintf( paste0( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "length of 'userBetaSpending' (%s) must be equal to 'kMax' (%s)" ), length(design$userBetaSpending), design$kMax )) } if (length(design$userBetaSpending) < 2 || length(design$userBetaSpending) > C_KMAX_UPPER_BOUND) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'userBetaSpending' (%s) is out of bounds [2; %s]" ), length(design$userBetaSpending), C_KMAX_UPPER_BOUND )) } if (.isUndefinedArgument(design$beta)) { design$beta <- design$userBetaSpending[design$kMax] design$.setParameterType("beta", ifelse(design$beta == C_BETA_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_DERIVED )) } .assertIsValidBeta(beta = design$beta, alpha = design$alpha) if (design$kMax > 1 && (design$userBetaSpending[1] < 0 || design$userBetaSpending[design$kMax] > design$beta || any(design$userBetaSpending[2:design$kMax] - design$userBetaSpending[1:(design$kMax - 1)] < 0))) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'userBetaSpending' = %s must be a vector that satisfies the following condition: ", "0 <= beta_1 <= .. <= beta_%s <= beta = %s" ), .arrayToString(design$userBetaSpending, vectorLookAndFeelEnabled = TRUE), design$kMax, design$beta )) } } .validateUserAlphaSpendingLength <- function(design) { if (length(design$userAlphaSpending) < 1 || length(design$userAlphaSpending) > C_KMAX_UPPER_BOUND) { stop(sprintf( paste0( C_EXCEPTION_TYPE_ARGUMENT_LENGTH_OUT_OF_BOUNDS, "length of 'userAlphaSpending' (%s) is out of bounds [1; %s]" ), length(design$userAlphaSpending), C_KMAX_UPPER_BOUND )) } } .setKmaxBasedOnAlphaSpendingDefintion <- function(design) { if (.isTrialDesignFisher(design)) { if (design$method != C_FISHER_METHOD_USER_DEFINED_ALPHA) { return(invisible()) } } else { if (design$typeOfDesign != C_TYPE_OF_DESIGN_AS_USER) { return(invisible()) } } if (.isDefinedArgument(design$kMax)) { return(invisible()) } if (.isUndefinedArgument(design$userAlphaSpending)) { return(invisible()) } if (.isDefinedArgument(design$informationRates)) { return(invisible()) } if (.isTrialDesignFisher(design)) { if (.isDefinedArgument(design$alpha0Vec)) { return(invisible()) } } else { if (.isDefinedArgument(design$futilityBounds)) { return(invisible()) } } .validateUserAlphaSpendingLength(design) .setKMax(design, kMax = length(design$userAlphaSpending)) } # This function generates the piecewise exponential survival function or (if kappa != 1) a Weibull cdf .getPiecewiseExponentialDistributionSingleTime <- function(time, piecewiseLambda, piecewiseSurvivalTime = NA_real_, kappa) { if (length(piecewiseLambda) == 1) { if (kappa <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'kappa' (", kappa, ") must be > 0") } return(pweibull(time, kappa, scale = 1 / piecewiseLambda, lower.tail = TRUE, log.p = FALSE)) } if (length(piecewiseSurvivalTime) != length(piecewiseLambda)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piecewiseSurvivalTime' (", .arrayToString(piecewiseSurvivalTime), ") must be equal to length of 'piecewiseLambda' (", .arrayToString(piecewiseLambda), ")" ) } piecewiseSurvivalTime <- .getPiecewiseExpStartTimesWithoutLeadingZero(piecewiseSurvivalTime) if (kappa != 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Weibull distribution cannot be used for piecewise survival definition" ) } len <- length(piecewiseSurvivalTime) for (i in 1:len) { if (time <= piecewiseSurvivalTime[i]) { if (i == 1) { return(1 - exp(-(piecewiseLambda[1] * time))) } y <- piecewiseLambda[1] * piecewiseSurvivalTime[1] if (i > 2) { y <- y + sum(piecewiseLambda[2:(i - 1)] * (piecewiseSurvivalTime[2:(i - 1)] - piecewiseSurvivalTime[1:(i - 2)])) } y <- y + piecewiseLambda[i] * (time - piecewiseSurvivalTime[i - 1]) return(1 - exp(-y)) } } if (len == 1) { y <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + piecewiseLambda[len + 1] * (time - piecewiseSurvivalTime[len]) } else { y <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + sum(piecewiseLambda[2:len] * (piecewiseSurvivalTime[2:len] - piecewiseSurvivalTime[1:(len - 1)])) + piecewiseLambda[len + 1] * (time - piecewiseSurvivalTime[len]) } return(1 - exp(-y)) } .getPiecewiseExponentialSingleQuantile <- function(quantile, piecewiseLambda, piecewiseSurvivalTime, kappa) { if (length(piecewiseLambda) == 1) { if (kappa <= 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "kappa needs to a positive number" ) } return((-log(1 - quantile))^(1 / kappa) / piecewiseLambda[1]) } if (kappa != 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Weibull distribution cannot be used for piecewise survival definition" ) } cdfValues <- .getPiecewiseExponentialDistribution(piecewiseSurvivalTime, piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda, kappa = 1 ) cdfValues <- cdfValues[2:length(cdfValues)] # use values without a leading 0 piecewiseSurvivalTime <- .getPiecewiseExpStartTimesWithoutLeadingZero(piecewiseSurvivalTime) len <- length(piecewiseSurvivalTime) for (i in 1:len) { if (quantile <= cdfValues[i]) { if (i == 1) { return(-log(1 - quantile) / piecewiseLambda[1]) } y <- piecewiseLambda[1] * piecewiseSurvivalTime[1] if (i > 2) { y <- y + sum(piecewiseLambda[2:(i - 1)] * (piecewiseSurvivalTime[2:(i - 1)] - piecewiseSurvivalTime[1:(i - 2)])) } return(piecewiseSurvivalTime[i - 1] - (log(1 - quantile) + y) / piecewiseLambda[i]) } } if (len == 1) { return(piecewiseSurvivalTime[1] - (log(1 - quantile) + piecewiseLambda[1] * piecewiseSurvivalTime[1]) / piecewiseLambda[2]) } y <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + sum(piecewiseLambda[2:len] * (piecewiseSurvivalTime[2:len] - piecewiseSurvivalTime[1:(len - 1)])) return(piecewiseSurvivalTime[len] - (log(1 - quantile) + y) / piecewiseLambda[len + 1]) } .getPiecewiseExponentialDistribution <- function(time, piecewiseLambda, piecewiseSurvivalTime, kappa) { if (length(time) == 1 && length(piecewiseSurvivalTime) == 1 && identical(time, piecewiseSurvivalTime) && length(piecewiseLambda) > 1) { result <- c() for (lambda in piecewiseLambda) { result <- c(result, .getPiecewiseExponentialDistributionSingleTime( time, lambda, piecewiseSurvivalTime, kappa )) } return(result) } result <- c() for (timeValue in time) { result <- c(result, .getPiecewiseExponentialDistributionSingleTime( timeValue, piecewiseLambda, piecewiseSurvivalTime, kappa )) } return(result) } .getPiecewiseExponentialSettings <- function(..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1) { if (!all(is.na(piecewiseLambda)) && is.list(piecewiseSurvivalTime)) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'piecewiseSurvivalTime' needs to be a numeric vector and not a list ", "because 'piecewiseLambda' (", piecewiseLambda, ") is defined separately" ) } if (any(is.na(piecewiseSurvivalTime))) { .assertIsSingleNumber(kappa, "kappa") } if (length(piecewiseLambda) == 1 && !is.na(piecewiseLambda) && length(piecewiseSurvivalTime) > 0 && !all(is.na(piecewiseSurvivalTime))) { warning("Argument 'piecewiseSurvivalTime' will be ignored because ", "length of 'piecewiseLambda' is 1", call. = FALSE ) } setting <- PiecewiseSurvivalTime( piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = piecewiseLambda, hazardRatio = 1, kappa = kappa, delayedResponseAllowed = FALSE ) return(list( piecewiseSurvivalTime = setting$piecewiseSurvivalTime, piecewiseLambda = setting$lambda2 )) } #' #' @title #' The Piecewise Exponential Distribution #' #' @description #' Distribution function, quantile function and random number generation for the #' piecewise exponential distribution. #' #' @param t,time Vector of time values. #' @param q,quantile Vector of quantiles. #' @param n Number of observations. #' @param s,piecewiseSurvivalTime Vector of start times defining the "time pieces". #' @param lambda,piecewiseLambda Vector of lambda values (hazard rates) corresponding to the start times. #' @inheritParams param_kappa #' @inheritParams param_three_dots #' #' @details #' \code{getPiecewiseExponentialDistribution()} (short: \code{ppwexp()}), #' \code{getPiecewiseExponentialQuantile()} (short: \code{qpwexp()}), and #' \code{getPiecewiseExponentialRandomNumbers()} (short: \code{rpwexp()}) provide #' probabilities, quantiles, and random numbers according to a piecewise #' exponential or a Weibull distribution. #' The piecewise definition is performed through a vector of #' starting times (\code{piecewiseSurvivalTime}) and a vector of hazard rates (\code{piecewiseLambda}). #' You can also use a list that defines the starting times and piecewise #' lambdas together and define piecewiseSurvivalTime as this list. #' The list needs to have the form, e.g., #' piecewiseSurvivalTime <- list( #' "0 - <6" = 0.025, #' "6 - <9" = 0.04, #' "9 - <15" = 0.015, #' ">=15" = 0.007) . #' For the Weibull case, you can also specify a shape parameter kappa in order to #' calculate probabilities, quantiles, or random numbers. #' In this case, no piecewise definition is possible, i.e., only piecewiseLambda #' (as a single value) and kappa need to be specified. #' #' @return A \code{\link[base]{numeric}} value or vector will be returned. #' #' @examples #' # Calculate probabilties for a range of time values for a #' # piecewise exponential distribution with hazard rates #' # 0.025, 0.04, 0.015, and 0.007 in the intervals #' # [0, 6), [6, 9), [9, 15), [15, Inf), respectively, #' # and re-return the time values: #' piecewiseSurvivalTime <- list( #' "0 - <6" = 0.025, #' "6 - <9" = 0.04, #' "9 - <15" = 0.015, #' ">=15" = 0.01 #' ) #' y <- getPiecewiseExponentialDistribution(seq(0, 150, 15), #' piecewiseSurvivalTime = piecewiseSurvivalTime #' ) #' getPiecewiseExponentialQuantile(y, #' piecewiseSurvivalTime = piecewiseSurvivalTime #' ) #' #' @name utilitiesForPiecewiseExponentialDistribution #' NULL #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export getPiecewiseExponentialDistribution <- function(time, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1) { .warnInCaseOfUnknownArguments(functionName = "getPiecewiseExponentialDistribution", ...) .assertIsNumericVector(time, "time") if (any(time < 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "time needs to be a non-negative number" ) } settings <- .getPiecewiseExponentialSettings( piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda, kappa = kappa ) return(.getPiecewiseExponentialDistribution( time = time, piecewiseSurvivalTime = settings$piecewiseSurvivalTime, piecewiseLambda = settings$piecewiseLambda, kappa = kappa )) } #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export ppwexp <- function(t, ..., s = NA_real_, lambda = NA_real_, kappa = 1) { getPiecewiseExponentialDistribution( time = t, piecewiseSurvivalTime = s, piecewiseLambda = lambda, kappa = kappa, ... ) } #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export getPiecewiseExponentialQuantile <- function(quantile, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1) { .warnInCaseOfUnknownArguments(functionName = "getPiecewiseExponentialQuantile", ...) .assertIsNumericVector(quantile, "quantile") if (any(quantile < 0) || any(quantile > 1)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "quantile needs to be within [0; 1]" ) } settings <- .getPiecewiseExponentialSettings( piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda, kappa = kappa ) result <- c() for (quantileValue in quantile) { result <- c(result, .getPiecewiseExponentialSingleQuantile(quantileValue, piecewiseSurvivalTime = settings$piecewiseSurvivalTime, piecewiseLambda = settings$piecewiseLambda, kappa )) } return(result) } #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export qpwexp <- function(q, ..., s = NA_real_, lambda = NA_real_, kappa = 1) { getPiecewiseExponentialQuantile( quantile = q, piecewiseSurvivalTime = s, piecewiseLambda = lambda, kappa = kappa, ... ) } .getPiecewiseExponentialRandomNumbersFast <- function(n, piecewiseSurvivalTime, piecewiseLambda) { result <- rexp(n, rate = piecewiseLambda[1]) if (length(piecewiseSurvivalTime) > 1) { for (i in 2:length(piecewiseSurvivalTime)) { result <- ifelse(result < piecewiseSurvivalTime[i], result, piecewiseSurvivalTime[i] + rexp(n, rate = piecewiseLambda[i]) ) } } result } #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export getPiecewiseExponentialRandomNumbers <- function(n, ..., piecewiseSurvivalTime = NA_real_, piecewiseLambda = NA_real_, kappa = 1) { .warnInCaseOfUnknownArguments(functionName = "getPiecewiseExponentialRandomNumbers", ...) .assertIsSingleInteger(n, "n", validateType = FALSE) .assertIsInClosedInterval(n, "n", lower = 1, upper = NULL) settings <- .getPiecewiseExponentialSettings( piecewiseSurvivalTime = piecewiseSurvivalTime, piecewiseLambda = piecewiseLambda, kappa = kappa ) if (kappa == 1) { return(.getPiecewiseExponentialRandomNumbersFast(n, piecewiseSurvivalTime = settings$piecewiseSurvivalTime, piecewiseLambda = settings$piecewiseLambda )) } randomQuantiles <- runif(n, 0, 1) result <- c() for (quantile in randomQuantiles) { result <- c(result, .getPiecewiseExponentialSingleQuantile(quantile, piecewiseSurvivalTime = settings$piecewiseSurvivalTime, piecewiseLambda = settings$piecewiseLambda, kappa = kappa )) } return(result) } #' @rdname utilitiesForPiecewiseExponentialDistribution #' @export rpwexp <- function(n, ..., s = NA_real_, lambda = NA_real_, kappa = 1) { getPiecewiseExponentialRandomNumbers( n = n, piecewiseSurvivalTime = s, piecewiseLambda = lambda, kappa = kappa, ... ) } #' #' @title #' Survival Helper Functions for Conversion of Pi, Lambda, Median #' #' @description #' Functions to convert pi, lambda and median values into each other. #' #' @param piValue,pi1,pi2,lambda,median Value that shall be converted. #' @inheritParams param_eventTime #' @inheritParams param_kappa #' #' @details #' Can be used, e.g., to convert median values into pi or lambda values for usage in #' \code{\link[=getSampleSizeSurvival]{getSampleSizeSurvival()}} or \code{\link[=getPowerSurvival]{getPowerSurvival()}}. #' #' @return Returns a \code{\link[base]{numeric}} value or vector will be returned. #' #' @name utilitiesForSurvivalTrials #' NULL #' @rdname utilitiesForSurvivalTrials #' @export getLambdaByPi <- function(piValue, eventTime = 12, # C_EVENT_TIME_DEFAULT kappa = 1) { .assertIsValidPi(piValue, "pi") .assertIsValidKappa(kappa) .assertIsSingleNumber(eventTime, "eventTime") .assertIsInOpenInterval(eventTime, "eventTime", lower = 0, upper = NULL) for (value in piValue) { if (value > 1 - 1e-16 && value < 1 + 1e-16) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'pi' must be != 1") } } return((-log(1 - piValue))^(1 / kappa) / eventTime) } #' @rdname utilitiesForSurvivalTrials #' @export getLambdaByMedian <- function(median, kappa = 1) { .assertIsNumericVector(median, "median") .assertIsValidKappa(kappa) return(log(2)^(1 / kappa) / median) } #' @rdname utilitiesForSurvivalTrials #' @export getHazardRatioByPi <- function(pi1, pi2, eventTime = 12, # C_EVENT_TIME_DEFAULT kappa = 1) { .assertIsValidPi(pi1, "pi1") .assertIsValidPi(pi2, "pi2") .assertIsValidKappa(kappa) .assertIsSingleNumber(eventTime, "eventTime") .assertIsInOpenInterval(eventTime, "eventTime", lower = 0, upper = NULL) return((getLambdaByPi(pi1, eventTime, kappa) / getLambdaByPi(pi2, eventTime, kappa))^kappa) } #' @rdname utilitiesForSurvivalTrials #' @export getPiByLambda <- function(lambda, eventTime = 12, # C_EVENT_TIME_DEFAULT kappa = 1) { .assertIsValidLambda(lambda) .assertIsValidKappa(kappa) .assertIsSingleNumber(eventTime, "eventTime") .assertIsInOpenInterval(eventTime, "eventTime", lower = 0, upper = NULL) x <- exp(-(lambda * eventTime)^kappa) if (any(x < 1e-15)) { warning("Calculation of pi (1) by lambda (", .arrayToString(round(lambda, 4)), ") results in a possible loss of precision because pi = 1 was returned but pi is not exactly 1", call. = FALSE ) } return(1 - x) } # alternative: return(1 - exp(-(log(2)^(1 / kappa) / median * eventTime)^kappa)) #' @rdname utilitiesForSurvivalTrials #' @export getPiByMedian <- function(median, eventTime = 12, # C_EVENT_TIME_DEFAULT kappa = 1) { .assertIsNumericVector(median, "median") .assertIsValidKappa(kappa) .assertIsSingleNumber(eventTime, "eventTime") .assertIsInOpenInterval(eventTime, "eventTime", lower = 0, upper = NULL) return(1 - 2^(-(eventTime / median)^kappa)) } #' @rdname utilitiesForSurvivalTrials #' @export getMedianByLambda <- function(lambda, kappa = 1) { .assertIsValidLambda(lambda) .assertIsValidKappa(kappa) return(log(2)^(1 / kappa) / lambda) } #' @rdname utilitiesForSurvivalTrials #' @export getMedianByPi <- function(piValue, eventTime = 12, # C_EVENT_TIME_DEFAULT kappa = 1) { .assertIsValidPi(piValue, "piValue") .assertIsSingleNumber(eventTime, "eventTime") .assertIsInOpenInterval(eventTime, "eventTime", lower = 0, upper = NULL) .assertIsValidKappa(kappa) getMedianByLambda(getLambdaByPi(piValue, eventTime, kappa), kappa) } .convertStageWiseToOverallValuesInner <- function(valuesPerStage) { eventsOverStages <- matrix(valuesPerStage, nrow = nrow(as.matrix(valuesPerStage))) eventsOverStages[is.na(eventsOverStages)] <- 0 for (i in 1:ncol(as.matrix(valuesPerStage))) { eventsOverStages[, i] <- cumsum(eventsOverStages[, i]) } return(eventsOverStages) } # example: .convertStageWiseToOverallValues(array(1:4, c(3, 4))) .convertStageWiseToOverallValues <- function(valuesPerStage) { if (is.array(valuesPerStage) && length(dim(valuesPerStage)) == 3) { eventsOverStages <- array(dim = dim(valuesPerStage)) for (g in 1:dim(valuesPerStage)[3]) { eventsTemp <- matrix(valuesPerStage[, , g], nrow = dim(valuesPerStage)[1]) eventsOverStages[, , g] <- .convertStageWiseToOverallValuesInner(eventsTemp) } return(eventsOverStages) } return(.convertStageWiseToOverallValuesInner(valuesPerStage)) } .getDesignParametersToShow <- function(paramaterSet) { if (is.null(paramaterSet[[".design"]])) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'paramaterSet' (", .getClassName(paramaterSet), ") does not contain '.design' field" ) } designParametersToShow <- c(".design$stages") if (grepl("Dunnett", .getClassName(paramaterSet))) { designParametersToShow <- c( designParametersToShow, ".design$alpha", ".design$informationAtInterim", ".design$secondStageConditioning", ".design$sided" ) } else { design <- paramaterSet$.design designParametersToShow <- c() if (design$kMax > 1) { if (is.null(paramaterSet[[".stageResults"]]) || .isTrialDesignGroupSequential(design)) { designParametersToShow <- c(designParametersToShow, ".design$informationRates") } else if (.isTrialDesignInverseNormal(design)) { designParametersToShow <- c(designParametersToShow, ".stageResults$weightsInverseNormal") } else if (.isTrialDesignFisher(design)) { designParametersToShow <- c(designParametersToShow, ".stageResults$weightsFisher") } if (design$.isDelayedResponseDesign()) { designParametersToShow <- c(designParametersToShow, ".design$delayedInformation") } } designParametersToShow <- c(designParametersToShow, ".design$criticalValues") if (design$.isDelayedResponseDesign()) { designParametersToShow <- c(designParametersToShow, ".design$decisionCriticalValues") } if (design$kMax > 1) { if (.isTrialDesignFisher(design)) { designParametersToShow <- c(designParametersToShow, ".design$alpha0Vec") } else { designParametersToShow <- c(designParametersToShow, ".design$futilityBounds") } designParametersToShow <- c(designParametersToShow, ".design$alphaSpent") designParametersToShow <- c(designParametersToShow, ".design$stageLevels") } if (design$sided == 2 && !grepl("Analysis|Simulation", .getClassName(paramaterSet)) && (!inherits(paramaterSet, "TrialDesignPlan") || paramaterSet$.isSampleSizeObject())) { designParametersToShow <- c(designParametersToShow, ".design$twoSidedPower") } designParametersToShow <- c(designParametersToShow, ".design$alpha") if (!grepl("Analysis|Simulation", .getClassName(paramaterSet)) && (!inherits(paramaterSet, "TrialDesignPlan") || paramaterSet$.isSampleSizeObject())) { designParametersToShow <- c(designParametersToShow, ".design$beta") } designParametersToShow <- c(designParametersToShow, ".design$sided") } return(designParametersToShow) } .isNoEarlyEfficacy <- function(design) { .assertIsTrialDesignInverseNormalOrGroupSequential(design) if (design$kMax == 1) { return(FALSE) } if (design$typeOfDesign == C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY) { return(TRUE) } if (design$typeOfDesign != C_TYPE_OF_DESIGN_AS_USER) { return(FALSE) } indices <- design$userAlphaSpending == 0 return(all(indices[1:(length(indices) - 1)])) } .addDelayedInformationRates <- function(dataFrame) { if (all(c("informationRates", "delayedInformation", "kMax", "stages") %in% colnames(dataFrame))) { kMax <- max(dataFrame$kMax) if (kMax > 1) { dataFrame$delayedInformationRates <- dataFrame$informationRates + dataFrame$delayedInformation dataFrame$delayedInformationRates[dataFrame$stages == kMax] <- NA_real_ } } return(dataFrame) } .getNoEarlyEfficacyZeroCorrectedValues <- function(design, values) { if (design$kMax == 1) { return(values) } if (design$typeOfDesign == "noEarlyEfficacy") { values[1:(design$kMax - 1)] <- 0 } else if (design$typeOfDesign == "asUser") { for (k in 1:(design$kMax - 1)) { if (!is.na(design$userAlphaSpending[k]) && abs(design$userAlphaSpending[k]) < 1e-16) { values[k] <- 0 } } } return(values) } rpact/R/RcppExports.R0000644000176200001440000001621614450543724014216 0ustar liggesusers# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 getFisherCombinationSizeCpp <- function(kMax, alpha0Vec, criticalValues, tVec, cases) { .Call(`_rpact_getFisherCombinationSizeCpp`, kMax, alpha0Vec, criticalValues, tVec, cases) } getSimulatedAlphaCpp <- function(kMax, alpha0, criticalValues, tVec, iterations) { .Call(`_rpact_getSimulatedAlphaCpp`, kMax, alpha0, criticalValues, tVec, iterations) } getFisherCombinationCasesCpp <- function(kMax, tVec) { .Call(`_rpact_getFisherCombinationCasesCpp`, kMax, tVec) } getDesignFisherTryCpp <- function(kMax, alpha, tolerance, criticalValues, scale, alpha0Vec, userAlphaSpending, method) { .Call(`_rpact_getDesignFisherTryCpp`, kMax, alpha, tolerance, criticalValues, scale, alpha0Vec, userAlphaSpending, method) } getGroupSequentialProbabilitiesCpp <- function(decisionMatrix, informationRates) { .Call(`_rpact_getGroupSequentialProbabilitiesCpp`, decisionMatrix, informationRates) } getDesignGroupSequentialPampallonaTsiatisCpp <- function(tolerance, beta, alpha, kMax, deltaPT0, deltaPT1, informationRates, sided, bindingFutility) { .Call(`_rpact_getDesignGroupSequentialPampallonaTsiatisCpp`, tolerance, beta, alpha, kMax, deltaPT0, deltaPT1, informationRates, sided, bindingFutility) } getDesignGroupSequentialUserDefinedAlphaSpendingCpp <- function(kMax, userAlphaSpending, sided, informationRates, bindingFutility, futilityBounds, tolerance) { .Call(`_rpact_getDesignGroupSequentialUserDefinedAlphaSpendingCpp`, kMax, userAlphaSpending, sided, informationRates, bindingFutility, futilityBounds, tolerance) } getDesignGroupSequentialAlphaSpendingCpp <- function(kMax, alpha, gammaA, typeOfDesign, sided, informationRates, bindingFutility, futilityBounds, tolerance) { .Call(`_rpact_getDesignGroupSequentialAlphaSpendingCpp`, kMax, alpha, gammaA, typeOfDesign, sided, informationRates, bindingFutility, futilityBounds, tolerance) } getDesignGroupSequentialDeltaWTCpp <- function(kMax, alpha, sided, informationRates, bindingFutility, futilityBounds, tolerance, deltaWT) { .Call(`_rpact_getDesignGroupSequentialDeltaWTCpp`, kMax, alpha, sided, informationRates, bindingFutility, futilityBounds, tolerance, deltaWT) } getDesignGroupSequentialPocockCpp <- function(kMax, alpha, sided, informationRates, bindingFutility, futilityBounds, tolerance) { .Call(`_rpact_getDesignGroupSequentialPocockCpp`, kMax, alpha, sided, informationRates, bindingFutility, futilityBounds, tolerance) } getDesignGroupSequentialOBrienAndFlemingCpp <- function(kMax, alpha, sided, informationRates, bindingFutility, futilityBounds, tolerance) { .Call(`_rpact_getDesignGroupSequentialOBrienAndFlemingCpp`, kMax, alpha, sided, informationRates, bindingFutility, futilityBounds, tolerance) } getDesignGroupSequentialBetaSpendingCpp <- function(criticalValues, kMax, userAlphaSpending, userBetaSpending, informationRates, bindingFutility, tolerance, typeOfDesign, typeBetaSpending, gammaA, gammaB, alpha, beta, sided, betaAdjustment, twoSidedPower) { .Call(`_rpact_getDesignGroupSequentialBetaSpendingCpp`, criticalValues, kMax, userAlphaSpending, userBetaSpending, informationRates, bindingFutility, tolerance, typeOfDesign, typeBetaSpending, gammaA, gammaB, alpha, beta, sided, betaAdjustment, twoSidedPower) } getDesignGroupSequentialUserDefinedBetaSpendingCpp <- function(criticalValues, kMax, userAlphaSpending, userBetaSpending, sided, informationRates, bindingFutility, tolerance, typeOfDesign, gammaA, alpha, betaAdjustment, twoSidedPower) { .Call(`_rpact_getDesignGroupSequentialUserDefinedBetaSpendingCpp`, criticalValues, kMax, userAlphaSpending, userBetaSpending, sided, informationRates, bindingFutility, tolerance, typeOfDesign, gammaA, alpha, betaAdjustment, twoSidedPower) } getSimulationMeansLoopCpp <- function(alternative, kMax, maxNumberOfIterations, designNumber, informationRates, futilityBounds, alpha0Vec, criticalValues, meanRatio, thetaH0, stDev, groups, normalApproximation, plannedSubjects, directionUpper, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, thetaH1, stDevH1, calcSubjectsFunctionType, calcSubjectsFunctionR, calcSubjectsFunctionCpp) { .Call(`_rpact_getSimulationMeansLoopCpp`, alternative, kMax, maxNumberOfIterations, designNumber, informationRates, futilityBounds, alpha0Vec, criticalValues, meanRatio, thetaH0, stDev, groups, normalApproximation, plannedSubjects, directionUpper, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, thetaH1, stDevH1, calcSubjectsFunctionType, calcSubjectsFunctionR, calcSubjectsFunctionCpp) } getSimulationRatesCpp <- function(kMax, informationRates, criticalValues, pi1, pi2, maxNumberOfIterations, designNumber, groups, futilityBounds, alpha0Vec, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, pi1H1, pi2H1, normalApproximation, plannedSubjects, directionUpper, allocationRatioPlanned, riskRatio, thetaH0, calcSubjectsFunctionType, calcSubjectsFunctionR, calcSubjectsFunctionCpp) { .Call(`_rpact_getSimulationRatesCpp`, kMax, informationRates, criticalValues, pi1, pi2, maxNumberOfIterations, designNumber, groups, futilityBounds, alpha0Vec, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, pi1H1, pi2H1, normalApproximation, plannedSubjects, directionUpper, allocationRatioPlanned, riskRatio, thetaH0, calcSubjectsFunctionType, calcSubjectsFunctionR, calcSubjectsFunctionCpp) } getSimulationSurvivalCpp <- function(designNumber, kMax, sided, criticalValues, informationRates, conditionalPower, plannedEvents, thetaH1, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, directionUpper, allocationRatioPlanned, accrualTime, treatmentGroup, thetaH0, futilityBounds, alpha0Vec, pi1Vec, pi2, eventTime, piecewiseSurvivalTime, cdfValues1, cdfValues2, lambdaVec1, lambdaVec2, phi, maxNumberOfSubjects, maxNumberOfIterations, maxNumberOfRawDatasetsPerStage, kappa, calcEventsFunctionType, calcEventsFunctionR, calcEventsFunctionCpp) { .Call(`_rpact_getSimulationSurvivalCpp`, designNumber, kMax, sided, criticalValues, informationRates, conditionalPower, plannedEvents, thetaH1, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, directionUpper, allocationRatioPlanned, accrualTime, treatmentGroup, thetaH0, futilityBounds, alpha0Vec, pi1Vec, pi2, eventTime, piecewiseSurvivalTime, cdfValues1, cdfValues2, lambdaVec1, lambdaVec2, phi, maxNumberOfSubjects, maxNumberOfIterations, maxNumberOfRawDatasetsPerStage, kappa, calcEventsFunctionType, calcEventsFunctionR, calcEventsFunctionCpp) } getOneMinusQNorm <- function(p, mean = 0, sd = 1, lowerTail = 1, logP = 0, epsilon = 1.0e-100) { .Call(`_rpact_getOneMinusQNorm`, p, mean, sd, lowerTail, logP, epsilon) } zeroin <- function(f, lower, upper, tolerance, maxIter) { .Call(`_rpact_zeroin`, f, lower, upper, tolerance, maxIter) } getCipheredValue <- function(x) { .Call(`_rpact_getCipheredValue`, x) } getFraction <- function(x, epsilon = 1.0e-6, maxNumberOfSearchSteps = 30L) { .Call(`_rpact_getFraction`, x, epsilon, maxNumberOfSearchSteps) } rpact/R/f_simulation_multiarm_means.R0000644000176200001440000007316414445307576017527 0ustar liggesusers## | ## | *Simulation of multi-arm design with continuous data* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_simulation_multiarm.R NULL .getSimulationMeansMultiArmStageSubjects <- function(..., stage, conditionalPower, conditionalCriticalValue, plannedSubjects, allocationRatioPlanned, selectedArms, thetaH1, overallEffects, stDevH1, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage) { stage <- stage - 1 # to be consistent with non-multiarm situation gMax <- nrow(overallEffects) if (!is.na(conditionalPower)) { if (any(selectedArms[1:gMax, stage + 1], na.rm = TRUE)) { if (is.na(thetaH1)) { thetaStandardized <- max(min(overallEffects[ selectedArms[1:gMax, stage + 1], stage ] / stDevH1, na.rm = TRUE), 1e-07) } else { thetaStandardized <- max(thetaH1 / stDevH1, 1e-07) } if (conditionalCriticalValue[stage] > 8) { newSubjects <- maxNumberOfSubjectsPerStage[stage + 1] } else { newSubjects <- (1 + allocationRatioPlanned[stage]) * (max(0, conditionalCriticalValue[stage] + .getQNorm(conditionalPower)))^2 / thetaStandardized^2 newSubjects <- min( max(minNumberOfSubjectsPerStage[stage + 1], newSubjects), maxNumberOfSubjectsPerStage[stage + 1] ) } } else { newSubjects <- 0 } } else { newSubjects <- plannedSubjects[stage + 1] - plannedSubjects[stage] } return(newSubjects) } .getSimulatedStageMeansMultiArm <- function(..., design, muVector, stDev, plannedSubjects, typeOfSelection, effectMeasure, adaptations, epsilonValue, rValue, threshold, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, thetaH1, stDevH1, calcSubjectsFunction, calcSubjectsFunctionIsUserDefined, selectArmsFunction) { kMax <- length(plannedSubjects) gMax <- length(muVector) simMeans <- matrix(NA_real_, nrow = gMax + 1, ncol = kMax) overallEffects <- matrix(NA_real_, nrow = gMax, ncol = kMax) subjectsPerStage <- matrix(NA_real_, nrow = gMax + 1, ncol = kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) conditionalCriticalValue <- rep(NA_real_, kMax - 1) conditionalPowerPerStage <- rep(NA_real_, kMax) selectedArms <- matrix(FALSE, nrow = gMax + 1, ncol = kMax) selectedArms[, 1] <- TRUE adjustedPValues <- rep(NA_real_, kMax) if (.isTrialDesignFisher(design)) { weights <- .getWeightsFisher(design) } else if (.isTrialDesignInverseNormal(design)) { weights <- .getWeightsInverseNormal(design) } for (k in 1:kMax) { if (k == 1) { subjectsPerStage[gMax + 1, k] <- plannedSubjects[k] / allocationRatioPlanned[k] } else { subjectsPerStage[gMax + 1, k] <- (plannedSubjects[k] - plannedSubjects[k - 1]) / allocationRatioPlanned[k] } if (subjectsPerStage[gMax + 1, k] > 0) { simMeans[gMax + 1, k] <- stats::rnorm(1, 0, stDev / sqrt(subjectsPerStage[gMax + 1, k])) } for (treatmentArm in 1:gMax) { if (selectedArms[treatmentArm, k]) { if (k == 1) { subjectsPerStage[treatmentArm, k] <- plannedSubjects[k] } else { subjectsPerStage[treatmentArm, k] <- plannedSubjects[k] - plannedSubjects[k - 1] } if (subjectsPerStage[treatmentArm, k] > 0) { simMeans[treatmentArm, k] <- stats::rnorm( 1, muVector[treatmentArm], stDev / sqrt(subjectsPerStage[treatmentArm, k]) ) testStatistics[treatmentArm, k] <- (simMeans[treatmentArm, k] - simMeans[gMax + 1, k]) / (stDev * sqrt(1 / subjectsPerStage[treatmentArm, k] + 1 / subjectsPerStage[gMax + 1, k])) } overallEffects[treatmentArm, k] <- subjectsPerStage[treatmentArm, 1:k] %*% simMeans[treatmentArm, 1:k] / sum(subjectsPerStage[treatmentArm, 1:k]) - subjectsPerStage[gMax + 1, 1:k] %*% simMeans[gMax + 1, 1:k] / sum(subjectsPerStage[gMax + 1, 1:k]) overallTestStatistics[treatmentArm, k] <- overallEffects[treatmentArm, k] / (stDev * sqrt(1 / sum(subjectsPerStage[treatmentArm, 1:k]) + 1 / sum(subjectsPerStage[gMax + 1, 1:k]))) separatePValues[treatmentArm, k] <- 1 - stats::pnorm(testStatistics[treatmentArm, k]) } } if (k < kMax) { if (colSums(selectedArms)[k] == 1) { break } # Bonferroni adjustment adjustedPValues[k] <- min(min(separatePValues[, k], na.rm = TRUE) * (colSums(selectedArms)[k] - 1), 1 - 1e-7) # conditional critical value to reject the null hypotheses at the next stage of the trial if (.isTrialDesignConditionalDunnett(design)) { conditionalCriticalValue[k] <- (.getOneMinusQNorm(design$alpha) - .getOneMinusQNorm(adjustedPValues[k]) * sqrt(design$informationAtInterim)) / sqrt(1 - design$informationAtInterim) } else { if (.isTrialDesignFisher(design)) { conditionalCriticalValue[k] <- .getOneMinusQNorm(min((design$criticalValues[k + 1] / prod(adjustedPValues[1:k]^weights[1:k]))^(1 / weights[k + 1]), 1 - 1e-7)) } else { conditionalCriticalValue[k] <- (design$criticalValues[k + 1] * sqrt(design$informationRates[k + 1]) - .getOneMinusQNorm(adjustedPValues[1:k]) %*% weights[1:k]) / sqrt(design$informationRates[k + 1] - design$informationRates[k]) } } if (adaptations[k]) { if (effectMeasure == "testStatistic") { selectedArms[, k + 1] <- (selectedArms[, k] & .selectTreatmentArms( k, overallTestStatistics[, k], typeOfSelection, epsilonValue, rValue, threshold, selectArmsFunction )) } else if (effectMeasure == "effectEstimate") { selectedArms[, k + 1] <- (selectedArms[, k] & .selectTreatmentArms( k, overallEffects[, k], typeOfSelection, epsilonValue, rValue, threshold, selectArmsFunction )) } newSubjects <- calcSubjectsFunction( stage = k + 1, # to be consistent with non-multiarm situation, cf. line 37 conditionalPower = conditionalPower, conditionalCriticalValue = conditionalCriticalValue, plannedSubjects = plannedSubjects, allocationRatioPlanned = allocationRatioPlanned, selectedArms = selectedArms, thetaH1 = thetaH1, stDevH1 = stDevH1, overallEffects = overallEffects, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage ) if (is.null(newSubjects) || length(newSubjects) != 1 || !is.numeric(newSubjects) || is.na(newSubjects) || newSubjects < 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'calcSubjectsFunction' returned an illegal or undefined result (", newSubjects, "); ", "the output must be a single numeric value >= 0" ) } if (!is.na(conditionalPower) || calcSubjectsFunctionIsUserDefined) { plannedSubjects[(k + 1):kMax] <- sum(subjectsPerStage[gMax + 1, 1:k] * allocationRatioPlanned[1:k]) + cumsum(rep(newSubjects, kMax - k)) } } else { selectedArms[, k + 1] <- selectedArms[, k] } if (is.na(thetaH1)) { thetaStandardized <- max(min(overallEffects[selectedArms[1:gMax, k], k] / stDevH1, na.rm = TRUE), 1e-12) } else { thetaStandardized <- thetaH1 / stDevH1 } conditionalPowerPerStage[k] <- 1 - stats::pnorm(conditionalCriticalValue[k] - thetaStandardized * sqrt(plannedSubjects[k + 1] - plannedSubjects[k]) * sqrt(1 / (1 + allocationRatioPlanned[k]))) } } return(list( subjectsPerStage = subjectsPerStage, allocationRatioPlanned = allocationRatioPlanned, overallEffects = overallEffects, testStatistics = testStatistics, overallTestStatistics = overallTestStatistics, separatePValues = separatePValues, conditionalCriticalValue = conditionalCriticalValue, conditionalPowerPerStage = conditionalPowerPerStage, selectedArms = selectedArms )) } #' #' @title #' Get Simulation Multi-Arm Means #' #' @description #' Returns the simulated power, stopping and selection probabilities, conditional power, #' and expected sample size for testing means in a multi-arm treatment groups testing situation. #' #' @param muMaxVector Range of effect sizes for the treatment group with highest response #' for \code{"linear"} and \code{"sigmoidEmax"} model, default is \code{seq(0, 1, 0.2)}. #' @inheritParams param_intersectionTest_MultiArm #' @inheritParams param_typeOfSelection #' @inheritParams param_effectMeasure #' @inheritParams param_adaptations #' @inheritParams param_threshold #' @inheritParams param_effectMatrix #' @inheritParams param_stDevSimulation #' @inheritParams param_activeArms #' @inheritParams param_successCriterion #' @inheritParams param_typeOfShape #' @inheritParams param_typeOfSelection #' @inheritParams param_design_with_default #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_plannedSubjects #' @inheritParams param_minNumberOfSubjectsPerStage #' @inheritParams param_maxNumberOfSubjectsPerStage #' @inheritParams param_conditionalPowerSimulation #' @inheritParams param_thetaH1 #' @inheritParams param_stDevH1 #' @inheritParams param_maxNumberOfIterations #' @inheritParams param_calcSubjectsFunction #' @inheritParams param_selectArmsFunction #' @inheritParams param_rValue #' @inheritParams param_epsilonValue #' @inheritParams param_gED50 #' @inheritParams param_slope #' @inheritParams param_seed #' @inheritParams param_three_dots #' @inheritParams param_showStatistics #' #' @details #' At given design the function simulates the power, stopping probabilities, selection probabilities, #' and expected sample size at given number of subjects, parameter configuration, and treatment arm #' selection rule in the multi-arm situation. #' An allocation ratio can be specified referring to the ratio of number of subjects in the active #' treatment groups as compared to the control group. #' #' The definition of \code{thetaH1} and/or \code{stDevH1} makes only sense if \code{kMax} > 1 #' and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and #' \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. #' #' \code{calcSubjectsFunction}\cr #' This function returns the number of subjects at given conditional power and conditional #' critical value for specified testing situation. The function might depend on the variables #' \code{stage}, #' \code{selectedArms}, #' \code{plannedSubjects}, #' \code{allocationRatioPlanned}, #' \code{minNumberOfSubjectsPerStage}, #' \code{maxNumberOfSubjectsPerStage}, #' \code{conditionalPower}, #' \code{conditionalCriticalValue}, #' \code{overallEffects}, and #' \code{stDevH1}. #' The function has to contain the three-dots argument '...' (see examples). #' #' @template return_object_simulation_results #' @template how_to_get_help_for_generics #' #' @template examples_get_simulation_multiarm_means #' #' @export #' getSimulationMultiArmMeans <- function(design = NULL, ..., activeArms = 3L, # C_ACTIVE_ARMS_DEFAULT effectMatrix = NULL, typeOfShape = c("linear", "sigmoidEmax", "userDefined"), # C_TYPE_OF_SHAPE_DEFAULT muMaxVector = seq(0, 1, 0.2), # C_ALTERNATIVE_POWER_SIMULATION_DEFAULT gED50 = NA_real_, slope = 1, intersectionTest = c("Dunnett", "Bonferroni", "Simes", "Sidak", "Hierarchical"), # C_INTERSECTION_TEST_MULTIARMED_DEFAULT stDev = 1, # C_STDEV_DEFAULT adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), # C_TYPE_OF_SELECTION_DEFAULT effectMeasure = c("effectEstimate", "testStatistic"), # C_EFFECT_MEASURE_DEFAULT successCriterion = c("all", "atLeastOne"), # C_SUCCESS_CRITERION_DEFAULT epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedSubjects = NA_integer_, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, stDevH1 = NA_real_, maxNumberOfIterations = 1000L, # C_MAX_SIMULATION_ITERATIONS_DEFAULT seed = NA_real_, calcSubjectsFunction = NULL, selectArmsFunction = NULL, showStatistics = FALSE) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "simulation") .warnInCaseOfUnknownArguments( functionName = "getSimulationMultiArmMeans", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "showStatistics"), ... ) } else { .assertIsTrialDesignInverseNormalOrFisherOrConditionalDunnett(design) .warnInCaseOfUnknownArguments( functionName = "getSimulationMultiArmMeans", ignore = "showStatistics", ... ) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsOneSidedDesign(design, designType = "multi-arm", engineType = "simulation") calcSubjectsFunctionIsUserDefined <- !is.null(calcSubjectsFunction) simulationResults <- .createSimulationResultsMultiArmObject( design = design, activeArms = activeArms, effectMatrix = effectMatrix, typeOfShape = typeOfShape, muMaxVector = muMaxVector, # means only gED50 = gED50, slope = slope, intersectionTest = intersectionTest, stDev = stDev, # means only adaptations = adaptations, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, successCriterion = successCriterion, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, plannedSubjects = plannedSubjects, # means + rates only allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, # means + rates only maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, # means + rates only conditionalPower = conditionalPower, thetaH1 = thetaH1, # means + survival only stDevH1 = stDevH1, # means only maxNumberOfIterations = maxNumberOfIterations, seed = seed, calcSubjectsFunction = calcSubjectsFunction, # means + rates only selectArmsFunction = selectArmsFunction, showStatistics = showStatistics, endpoint = "means" ) design <- simulationResults$.design successCriterion <- simulationResults$successCriterion effectMeasure <- simulationResults$effectMeasure adaptations <- simulationResults$adaptations gMax <- activeArms kMax <- simulationResults$.design$kMax intersectionTest <- simulationResults$intersectionTest typeOfSelection <- simulationResults$typeOfSelection effectMatrix <- t(simulationResults$effectMatrix) muMaxVector <- simulationResults$muMaxVector # means only thetaH1 <- simulationResults$thetaH1 # means + survival only stDevH1 <- simulationResults$stDevH1 # means only conditionalPower <- simulationResults$conditionalPower minNumberOfSubjectsPerStage <- simulationResults$minNumberOfSubjectsPerStage maxNumberOfSubjectsPerStage <- simulationResults$maxNumberOfSubjectsPerStage allocationRatioPlanned <- simulationResults$allocationRatioPlanned calcSubjectsFunction <- simulationResults$calcSubjectsFunction if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, kMax) } indices <- .getIndicesOfClosedHypothesesSystemForSimulation(gMax = gMax) if (.isTrialDesignConditionalDunnett(design)) { criticalValuesDunnett <- .getCriticalValuesDunnettForSimulation( alpha = design$alpha, indices = indices, allocationRatioPlanned = allocationRatioPlanned ) } cols <- length(muMaxVector) simulatedSelections <- array(0, dim = c(kMax, cols, gMax + 1)) simulatedRejections <- array(0, dim = c(kMax, cols, gMax)) simulatedNumberOfActiveArms <- matrix(0, nrow = kMax, ncol = cols) simulatedSubjectsPerStage <- array(0, dim = c(kMax, cols, gMax + 1)) simulatedSuccessStopping <- matrix(0, nrow = kMax, ncol = cols) simulatedFutilityStopping <- matrix(0, cols * (kMax - 1), nrow = kMax - 1, ncol = cols) simulatedConditionalPower <- matrix(0, nrow = kMax, ncol = cols) simulatedRejectAtLeastOne <- rep(0, cols) expectedNumberOfSubjects <- rep(0, cols) iterations <- matrix(0, nrow = kMax, ncol = cols) len <- maxNumberOfIterations * kMax * gMax * cols dataIterationNumber <- rep(NA_real_, len) dataStageNumber <- rep(NA_real_, len) dataArmNumber <- rep(NA_real_, len) dataAlternative <- rep(NA_real_, len) dataEffect <- rep(NA_real_, len) dataSubjectsControlArm <- rep(NA_real_, len) dataSubjectsActiveArm <- rep(NA_real_, len) dataNumberOfSubjects <- rep(NA_real_, len) dataNumberOfCumulatedSubjects <- rep(NA_real_, len) dataRejectPerStage <- rep(NA, len) dataFutilityStop <- rep(NA_real_, len) dataSuccessStop <- rep(NA, len) dataFutilityStop <- rep(NA, len) dataTestStatistics <- rep(NA_real_, len) dataConditionalCriticalValue <- rep(NA_real_, len) dataConditionalPowerAchieved <- rep(NA_real_, len) dataEffectEstimate <- rep(NA_real_, len) dataPValuesSeparate <- rep(NA_real_, len) if (is.na(stDevH1)) { stDevH1 <- stDev } index <- 1 for (i in 1:cols) { for (j in 1:maxNumberOfIterations) { stageResults <- .getSimulatedStageMeansMultiArm( design = design, muVector = effectMatrix[i, ], stDev = stDev, plannedSubjects = plannedSubjects, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, adaptations = adaptations, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, conditionalPower = conditionalPower, thetaH1 = thetaH1, stDevH1 = stDevH1, calcSubjectsFunction = calcSubjectsFunction, calcSubjectsFunctionIsUserDefined = calcSubjectsFunctionIsUserDefined, selectArmsFunction = selectArmsFunction ) if (.isTrialDesignConditionalDunnett(design)) { closedTest <- .performClosedConditionalDunnettTestForSimulation( stageResults = stageResults, design = design, indices = indices, criticalValuesDunnett = criticalValuesDunnett, successCriterion = successCriterion ) } else { closedTest <- .performClosedCombinationTestForSimulationMultiArm( stageResults = stageResults, design = design, indices = indices, intersectionTest = intersectionTest, successCriterion = successCriterion ) } rejectAtSomeStage <- FALSE rejectedArmsBefore <- rep(FALSE, gMax) for (k in 1:kMax) { simulatedRejections[k, i, ] <- simulatedRejections[k, i, ] + (closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore) simulatedSelections[k, i, ] <- simulatedSelections[k, i, ] + closedTest$selectedArms[, k] simulatedNumberOfActiveArms[k, i] <- simulatedNumberOfActiveArms[k, i] + sum(closedTest$selectedArms[, k]) if (!any(is.na(closedTest$successStop))) { simulatedSuccessStopping[k, i] <- simulatedSuccessStopping[k, i] + closedTest$successStop[k] } if ((kMax > 1) && (k < kMax)) { if (!any(is.na(closedTest$futilityStop))) { simulatedFutilityStopping[k, i] <- simulatedFutilityStopping[k, i] + (closedTest$futilityStop[k] && !closedTest$successStop[k]) } if (!closedTest$successStop[k] && !closedTest$futilityStop[k]) { simulatedConditionalPower[k + 1, i] <- simulatedConditionalPower[k + 1, i] + stageResults$conditionalPowerPerStage[k] } } iterations[k, i] <- iterations[k, i] + 1 for (g in (1:(gMax + 1))) { if (!is.na(stageResults$subjectsPerStage[g, k])) { simulatedSubjectsPerStage[k, i, g] <- simulatedSubjectsPerStage[k, i, g] + stageResults$subjectsPerStage[g, k] } } for (g in 1:gMax) { dataIterationNumber[index] <- j dataStageNumber[index] <- k dataArmNumber[index] <- g dataAlternative[index] <- muMaxVector[i] dataEffect[index] <- effectMatrix[i, g] dataSubjectsControlArm[index] <- round(stageResults$subjectsPerStage[gMax + 1, k], 1) dataSubjectsActiveArm[index] <- round(stageResults$subjectsPerStage[g, k], 1) dataNumberOfSubjects[index] <- round(sum(stageResults$subjectsPerStage[, k], na.rm = TRUE), 1) dataNumberOfCumulatedSubjects[index] <- round(sum(stageResults$subjectsPerStage[, 1:k], na.rm = TRUE), 1) dataRejectPerStage[index] <- closedTest$rejected[g, k] dataTestStatistics[index] <- stageResults$testStatistics[g, k] dataSuccessStop[index] <- closedTest$successStop[k] if (k < kMax) { dataFutilityStop[index] <- closedTest$futilityStop[k] dataConditionalCriticalValue[index] <- stageResults$conditionalCriticalValue[k] dataConditionalPowerAchieved[index + 1] <- stageResults$conditionalPowerPerStage[k] } dataEffectEstimate[index] <- stageResults$overallEffects[g, k] dataPValuesSeparate[index] <- closedTest$separatePValues[g, k] index <- index + 1 } if (!rejectAtSomeStage && any(closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore)) { simulatedRejectAtLeastOne[i] <- simulatedRejectAtLeastOne[i] + 1 rejectAtSomeStage <- TRUE } if ((k < kMax) && (closedTest$successStop[k] || closedTest$futilityStop[k])) { # rejected hypotheses remain rejected also in case of early stopping simulatedRejections[(k + 1):kMax, i, ] <- simulatedRejections[(k + 1):kMax, i, ] + matrix((closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore), kMax - k, gMax, byrow = TRUE ) break } rejectedArmsBefore <- closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore } } simulatedSubjectsPerStage[is.na(simulatedSubjectsPerStage)] <- 0 simulatedSubjectsPerStage[, i, ] <- simulatedSubjectsPerStage[, i, ] / iterations[, i] if (kMax > 1) { simulatedRejections[2:kMax, i, ] <- simulatedRejections[2:kMax, i, ] - simulatedRejections[1:(kMax - 1), i, ] stopping <- cumsum(simulatedSuccessStopping[1:(kMax - 1), i] + simulatedFutilityStopping[, i]) / maxNumberOfIterations expectedNumberOfSubjects[i] <- sum(simulatedSubjectsPerStage[1, i, ] + t(1 - stopping) %*% simulatedSubjectsPerStage[2:kMax, i, ]) } else { expectedNumberOfSubjects[i] <- sum(simulatedSubjectsPerStage[1, i, ]) } } simulatedConditionalPower[1, ] <- NA_real_ if (kMax > 1) { simulatedConditionalPower[2:kMax, ] <- simulatedConditionalPower[2:kMax, ] / iterations[2:kMax, ] } simulationResults$numberOfActiveArms <- simulatedNumberOfActiveArms / iterations - 1 simulationResults$rejectAtLeastOne <- simulatedRejectAtLeastOne / maxNumberOfIterations simulationResults$selectedArms <- simulatedSelections / maxNumberOfIterations simulationResults$rejectedArmsPerStage <- simulatedRejections / maxNumberOfIterations simulationResults$successPerStage <- simulatedSuccessStopping / maxNumberOfIterations simulationResults$futilityPerStage <- simulatedFutilityStopping / maxNumberOfIterations simulationResults$futilityStop <- base::colSums(simulatedFutilityStopping / maxNumberOfIterations) if (kMax > 1) { simulationResults$earlyStop <- simulationResults$futilityPerStage + simulationResults$successPerStage[1:(kMax - 1), ] simulationResults$conditionalPowerAchieved <- simulatedConditionalPower } simulationResults$sampleSizes <- simulatedSubjectsPerStage simulationResults$expectedNumberOfSubjects <- expectedNumberOfSubjects simulationResults$iterations <- iterations if (!all(is.na(simulationResults$conditionalPowerAchieved))) { simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } if (any(simulationResults$rejectedArmsPerStage < 0)) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "internal error, simulation not possible due to numerical overflow" ) } data <- data.frame( iterationNumber = dataIterationNumber, stageNumber = dataStageNumber, armNumber = dataArmNumber, muMax = dataAlternative, effect = dataEffect, numberOfSubjects = dataNumberOfSubjects, numberOfCumulatedSubjects = dataNumberOfCumulatedSubjects, subjectsControlArm = dataSubjectsControlArm, subjectsActiveArm = dataSubjectsActiveArm, effectEstimate = dataEffectEstimate, testStatistic = dataTestStatistics, pValue = dataPValuesSeparate, conditionalCriticalValue = round(dataConditionalCriticalValue, 6), conditionalPowerAchieved = round(dataConditionalPowerAchieved, 6), rejectPerStage = dataRejectPerStage, successStop = dataSuccessStop, futilityPerStage = dataFutilityStop ) data <- data[!is.na(data$effectEstimate), ] simulationResults$.data <- data return(simulationResults) } rpact/R/class_simulation_results.R0000644000176200001440000033753114445307575017073 0ustar liggesusers## | ## | *Simulation result classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R NULL #' #' @title #' Names of a Simulation Results Object #' #' @description #' Function to get the names of a \code{\link{SimulationResults}} object. #' #' @param x A \code{\link{SimulationResults}} object created by \code{getSimulationResults[MultiArm/Enrichment][Means/Rates/Survival]}. #' #' @details #' Returns the names of a simulation results that can be accessed by the user. #' #' @template return_names #' #' @export #' #' @keywords internal #' names.SimulationResults <- function(x) { namesToShow <- c(".design", ".data", ".rawData") if (inherits(x, "SimulationResultsSurvival")) { namesToShow <- c(namesToShow, ".piecewiseSurvivalTime", ".accrualTime") } namesToShow <- c(namesToShow, x$.getVisibleFieldNames()) return(namesToShow) } #' #' @name SimulationResults #' #' @title #' Class for Simulation Results #' #' @description #' A class for simulation results. #' #' @template field_seed #' @template field_iterations #' #' @details #' \code{SimulationResults} is the basic class for #' \itemize{ #' \item \code{\link{SimulationResultsMeans}}, #' \item \code{\link{SimulationResultsRates}}, #' \item \code{\link{SimulationResultsSurvival}}, #' \item \code{\link{SimulationResultsMultiArmMeans}}, #' \item \code{\link{SimulationResultsMultiArmRates}}, #' \item \code{\link{SimulationResultsMultiArmSurvival}}, #' \item \code{\link{SimulationResultsEnrichmentMeans}}, #' \item \code{\link{SimulationResultsEnrichmentRates}}, and #' \item \code{\link{SimulationResultsEnrichmentSurvival}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_base_survival.R #' @include f_simulation_utilities.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResults <- setRefClass("SimulationResults", contains = "ParameterSet", fields = list( .plotSettings = "PlotSettings", .design = "TrialDesign", .data = "data.frame", .rawData = "data.frame", .showStatistics = "logical", maxNumberOfIterations = "integer", seed = "numeric", allocationRatioPlanned = "numeric", conditionalPower = "numeric", iterations = "matrix", futilityPerStage = "matrix", futilityStop = "numeric" ), methods = list( initialize = function(design, ..., showStatistics = FALSE) { callSuper(.design = design, .showStatistics = showStatistics, ...) .plotSettings <<- PlotSettings() .parameterNames <<- .getParameterNames(design = design, designPlan = .self) .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, getPlotSettings = function() { return(.plotSettings) }, setShowStatistics = function(showStatistics) { .assertIsSingleLogical(showStatistics, "showStatistics") .showStatistics <<- showStatistics }, show = function(showType = 1, digits = NA_integer_, showStatistics = FALSE) { .show( showType = showType, digits = digits, showStatistics = showStatistics, consoleOutputEnabled = TRUE ) }, .show = function(..., showType = 1, digits = NA_integer_, showStatistics = FALSE, consoleOutputEnabled = TRUE, performanceScore = NULL) { "Method for automatically printing simulation result objects" .resetCat() if (showType == 3) { .createSummary(.self, digits = digits)$.show( showType = 1, digits = digits, consoleOutputEnabled = consoleOutputEnabled ) } else if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { if (is.null(showStatistics) || length(showStatistics) != 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'showStatistics' (", .arrayToString(showStatistics), ") must be a single logical or character" ) } if (!is.character(showStatistics) || showStatistics != "exclusive") { .cat(.toString(startWithUpperCase = TRUE), ":\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDesignParametersToShow(.self), "Design parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) userDefinedParameters <- .getUserDefinedParameters() if (inherits(.self, "SimulationResultsSurvival") && .self$.piecewiseSurvivalTime$delayedResponseEnabled) { userDefinedParameters <- c( userDefinedParameters, ".piecewiseSurvivalTime$delayedResponseEnabled" ) } .showParametersOfOneGroup(userDefinedParameters, "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) derivedParameters <- .getDerivedParameters() if (length(derivedParameters) > 0) { .showParametersOfOneGroup(derivedParameters, "Derived from user defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) } .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), "Results", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } ## statistics of simulated data if (isTRUE(showStatistics) || .showStatistics || (is.character(showStatistics) && showStatistics == "exclusive")) { .cat("Simulated data:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) params <- c() if (inherits(.self, "SimulationResultsMeans")) { params <- c( "effectMeasure", "numberOfSubjects", "testStatistic" ) } else if (inherits(.self, "SimulationResultsRates")) { params <- c( "effectMeasure", "numberOfSubjects", "testStatistic" ) } else if (inherits(.self, "SimulationResultsSurvival")) { params <- c( "effectMeasure", "analysisTime", "numberOfSubjects", "eventsPerStage1", "eventsPerStage2", "eventsPerStage", "testStatistic", "logRankStatistic", "hazardRatioEstimateLR" ) } else if (inherits(.self, "SimulationResultsMultiArmMeans") || inherits(.self, "SimulationResultsMultiArmRates")) { params <- c( "effectMeasure", "subjectsActiveArm", "testStatistic", "conditionalCriticalValue", "rejectPerStage", "successStop", "futilityPerStage" ) } else if (inherits(.self, "SimulationResultsEnrichmentMeans") || inherits(.self, "SimulationResultsEnrichmentRates")) { params <- c( "effectMeasure", "subjectsPopulation", "testStatistic", "conditionalCriticalValue", "rejectPerStage", "successStop", "futilityPerStage" ) } else if (inherits(.self, "SimulationResultsMultiArmSurvival") || inherits(.self, "SimulationResultsEnrichmentSurvival")) { params <- c( "effectMeasure", "numberOfEvents", "singleNumberOfEventsPerStage", "testStatistic", "conditionalCriticalValue", "rejectPerStage", "successStop", "futilityPerStage" ) } if (!is.null(.self[["conditionalPowerAchieved"]]) && !all(is.na(conditionalPowerAchieved)) && any(!is.na(conditionalPowerAchieved)) && any(na.omit(conditionalPowerAchieved) != 0)) { params <- c(params, "conditionalPowerAchieved") } stages <- sort(unique(.self$.data$stageNumber)) variedParameterName1 <- .getVariedParameterName(1) variedParameterName2 <- .getVariedParameterName(2) parameterValues1 <- .getVariedParameterValues(variedParameterName1) parameterValues2 <- .getVariedParameterValues(variedParameterName2) for (parameterName in params) { paramCaption <- .parameterNames[[parameterName]] if (is.null(paramCaption)) { paramCaption <- paste0("%", parameterName, "%") } for (parameterValue1 in parameterValues1) { for (parameterValue2 in parameterValues2) { for (stage in stages) { if (length(parameterValues1) > 1) { .catStatisticsLine( stage = stage, parameterName = parameterName, paramCaption = paramCaption, parameterValue1 = parameterValue1, variedParameterName1 = variedParameterName1, parameterValue2 = parameterValue2, variedParameterName2 = variedParameterName2, consoleOutputEnabled = consoleOutputEnabled ) } else { .catStatisticsLine( stage = stage, parameterName = parameterName, paramCaption = paramCaption, parameterValue1 = parameterValue2, variedParameterName1 = variedParameterName2, consoleOutputEnabled = consoleOutputEnabled ) } } } if (parameterName == "subjectsActiveArm" && variedParameterName2 == "armNumber") { parameterName2 <- "subjectsControlArm" paramCaption2 <- .parameterNames[[parameterName2]] if (is.null(paramCaption2)) { paramCaption2 <- paste0("%", parameterName2, "%") } for (stage in stages) { .catStatisticsLine( stage = stage, parameterName = parameterName2, paramCaption = paramCaption2, parameterValue1 = parameterValue1, variedParameterName1 = variedParameterName1, parameterValue2 = unique(parameterValues2), variedParameterName2 = variedParameterName2, consoleOutputEnabled = consoleOutputEnabled ) } } } } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } twoGroupsEnabled <- !inherits(.self, "SimulationResultsMeans") multiArmSurvivalEnabled <- inherits(.self, "SimulationResultsMultiArmSurvival") enrichmentEnabled <- grepl("SimulationResultsEnrichment", .getClassName(.self)) if (!is.null(performanceScore)) { performanceScore$.showParametersOfOneGroup( performanceScore$.getGeneratedParameters(), "Performance", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) performanceScore$.showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) } if (.design$kMax > 1 || twoGroupsEnabled || multiArmSurvivalEnabled) { .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) if (multiArmSurvivalEnabled) { .cat(" (i): values of treatment arm i compared to control\n", consoleOutputEnabled = consoleOutputEnabled) .cat(" {j}: values of treatment arm j\n", consoleOutputEnabled = consoleOutputEnabled) } else if (enrichmentEnabled) { matrixName <- .getSimulationEnrichmentEffectMatrixName(.self) if (nrow(.self$effectList[[matrixName]]) > 1) { .cat(" (i): results of situation i\n", consoleOutputEnabled = consoleOutputEnabled) } } else if (twoGroupsEnabled) { .cat(" (i): values of treatment arm i\n", consoleOutputEnabled = consoleOutputEnabled) } if (.design$kMax > 1) { .cat(" [k]: values at stage k\n", consoleOutputEnabled = consoleOutputEnabled) } if (enrichmentEnabled) { if (length(.self$effectList$subGroups) > 1) { .cat(paste0(" S[i]: population i\n"), consoleOutputEnabled = consoleOutputEnabled) } .cat(paste0(" F: full population\n"), consoleOutputEnabled = consoleOutputEnabled) if (length(.self$effectList$subGroups) > 1) { .cat(paste0(" R: remaining population\n"), consoleOutputEnabled = consoleOutputEnabled) } } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } } }, .getVariedParameterName = function(number = 1) { if (number == 2) { if (!inherits(.self, "SimulationResultsMeans") && !inherits(.self, "SimulationResultsRates") && !inherits(.self, "SimulationResultsSurvival") && grepl("MultiArm", .getClassName(.self))) { return("armNumber") } return(NA_character_) } variedParameterName1 <- NA_character_ if (inherits(.self, "SimulationResultsMeans")) { variedParameterName1 <- "alternative" } else if (inherits(.self, "SimulationResultsRates") || inherits(.self, "SimulationResultsSurvival")) { variedParameterName1 <- "pi1" } else if (grepl("MultiArm", .getClassName(.self))) { if (inherits(.self, "SimulationResultsMultiArmMeans")) { variedParameterName1 <- "muMax" } else if (inherits(.self, "SimulationResultsMultiArmRates")) { variedParameterName1 <- "piMax" } else if (inherits(.self, "SimulationResultsMultiArmSurvival")) { variedParameterName1 <- "omegaMax" } } return(variedParameterName1) }, .getVariedParameterValues = function(variedParameterName) { if (is.na(variedParameterName)) { return(NA_real_) } parameterValues <- .self$.data[[variedParameterName]] if (is.null(parameterValues)) { return(NA_real_) } parameterValues <- unique(parameterValues) if (length(parameterValues) > 1 && !any(is.na(parameterValues))) { parameterValues <- sort(parameterValues) } return(parameterValues) }, .getVariedParameterValueString = function(variedParameterName, parameterValue) { if (variedParameterName %in% c("armNumber")) { return(paste0(" (", parameterValue[1], ")")) } variedParameterName <- sub("Max$", "_max", variedParameterName) return(paste0(", ", variedParameterName, " = ", round(parameterValue[1], 4))) }, .catStatisticsLine = function(..., stage, parameterName, paramCaption, parameterValue1, variedParameterName1, parameterValue2 = NA_real_, variedParameterName2 = NA_character_, consoleOutputEnabled = TRUE) { if (stage == 1 && parameterName == "conditionalPowerAchieved") { return(invisible()) } postfix <- "" if (!is.na(parameterValue1)) { if (!all(is.na(parameterValue2))) { postfix <- paste0(postfix, .getVariedParameterValueString( variedParameterName1, parameterValue1 )) if (parameterName != "subjectsControlArm") { postfix <- paste0(postfix, .getVariedParameterValueString( variedParameterName2, parameterValue2 )) } paramValue <- .self$.data[[parameterName]][ .self$.data$stageNumber == stage & .self$.data[[variedParameterName1]] == parameterValue1 & .self$.data[[variedParameterName2]] %in% parameterValue2 ] } else { postfix <- paste0(postfix, .getVariedParameterValueString( variedParameterName1, parameterValue1 )) paramValue <- .self$.data[[parameterName]][ .self$.data$stageNumber == stage & .self$.data[[variedParameterName1]] == parameterValue1 ] } } else { paramValue <- .self$.data[[parameterName]][ .self$.data$stageNumber == stage ] } if (.design$kMax > 1) { postfix <- paste0(postfix, " [", stage, "]") } if (!consoleOutputEnabled) { paramCaption <- paste0("*", paramCaption, "*") } variableNameFormatted <- .getFormattedVariableName( name = paramCaption, n = .getNChar(), prefix = "", postfix = postfix ) if (!is.null(paramValue)) { paramValue <- stats::na.omit(paramValue) if (length(paramValue) > 0 && is.numeric(paramValue)) { paramValueFormatted <- paste0( "median [range]: ", round(stats::median(paramValue), 3), " [", paste(round(base::range(paramValue), 3), collapse = " - "), "]; ", "mean +/-sd: ", round(base::mean(paramValue), 3), " +/-", round(stats::sd(paramValue), 3) ) } else { paramValueFormatted <- "median [range]: NA [NA - NA]; mean +/sd: NA +/-NA" } output <- paste(variableNameFormatted, paramValueFormatted, "\n") if (!grepl(": median \\[range\\]: NA \\[NA - NA\\]", output)) { .cat(output, consoleOutputEnabled = consoleOutputEnabled) } } }, .toString = function(startWithUpperCase = FALSE) { s <- "simulation of" if (grepl("MultiArm", .getClassName(.self)) && !is.null(.self[["activeArms"]]) && .self$activeArms > 1) { s <- paste(s, "multi-arm") } if (grepl("Enrichment", .getClassName(.self)) && !is.null(.self[["populations"]]) && .self$populations > 1) { s <- paste(s, "enrichment") } if (inherits(.self, "SimulationResultsBaseMeans")) { s <- paste(s, "means") } else if (inherits(.self, "SimulationResultsBaseRates")) { s <- paste(s, "rates") } else if (inherits(.self, "SimulationResultsBaseSurvival")) { s <- paste(s, "survival data") } else { s <- paste(s, "results") } if (.design$kMax > 1) { if (.isTrialDesignGroupSequential(.design)) { s <- paste(s, "(group sequential design)") } else if (.isTrialDesignInverseNormal(.design)) { s <- paste(s, "(inverse normal combination test design)") } else if (.isTrialDesignFisher(.design)) { s <- paste(s, "(Fisher's combination test design)") } else if (.isTrialDesignConditionalDunnett(.design)) { s <- paste(s, "(conditional Dunnett design)") } } else { s <- paste(s, "(fixed sample size design)") } return(ifelse(startWithUpperCase, .firstCharacterToUpperCase(s), s)) }, .getParametersToShow = function() { parametersToShow <- .getVisibleFieldNames() y <- c( "eventsPerStage", "overallEventsPerStage", "iterations", "overallReject", # base "rejectAtLeastOne", "rejectPerStage", "rejectedArmsPerStage", "rejectedPopulationsPerStage" ) if (.design$kMax > 2) { y <- c(y, "futilityStop") } y <- c( y, "futilityPerStage", "earlyStop", # base "successPerStage", "selectedArms", "selectedPopulations", "numberOfActiveArms", "numberOfPopulations", "expectedNumberOfSubjects", "expectedNumberOfEvents", "sampleSizes", "singleNumberOfEventsPerStage", "conditionalPowerAchieved" # base ) parametersToShow <- c(parametersToShow[!(parametersToShow %in% y)], y[y %in% parametersToShow]) return(parametersToShow) }, .isSampleSizeObject = function() { return(FALSE) }, getRawDataResults = function(maxNumberOfIterations = NA_integer_) { return(.getSimulationParametersFromRawData(.self$.data, variantName = .getVariedParameterName(), maxNumberOfIterations = maxNumberOfIterations )) } ) ) SimulationResultsBaseMeans <- setRefClass("SimulationResultsBaseMeans", contains = "SimulationResults", fields = list( stDev = "numeric", plannedSubjects = "numeric", minNumberOfSubjectsPerStage = "numeric", maxNumberOfSubjectsPerStage = "numeric", thetaH1 = "numeric", stDevH1 = "numeric", calcSubjectsFunction = "ANY", expectedNumberOfSubjects = "numeric" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) generatedParams <- c( "iterations", "expectedNumberOfSubjects", "sampleSizes", "overallReject", "rejectPerStage", "futilityPerStage", "earlyStop" ) if (design$kMax > 2) { generatedParams <- c(generatedParams, "futilityStop") } for (generatedParam in generatedParams) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) #' #' @name SimulationResultsMeans #' #' @title #' Class for Simulation Results Means #' #' @description #' A class for simulation results means. #' #' @template field_maxNumberOfIterations #' @template field_seed #' @template field_allocationRatioPlanned #' @template field_conditionalPower #' @template field_iterations #' @template field_futilityPerStage #' @template field_futilityStop #' @template field_stDev #' @template field_plannedSubjects #' @template field_minNumberOfSubjectsPerStage #' @template field_maxNumberOfSubjectsPerStage #' @template field_thetaH1 #' @template field_stDevH1 #' @template field_calcSubjectsFunction #' @template field_expectedNumberOfSubjects #' @template field_meanRatio #' @template field_thetaH0 #' @template field_normalApproximation #' @template field_alternative #' @template field_groups #' @template field_directionUpper #' @template field_effect #' @template field_earlyStop #' @template field_sampleSizes #' @template field_overallReject #' @template field_rejectPerStage #' @template field_conditionalPowerAchieved #' #' @details #' Use \code{\link[=getSimulationMeans]{getSimulationMeans()}} to create an object of this type. #' #' \code{SimulationResultsMeans} is the basic class for #' \itemize{ #' \item \code{\link{SimulationResultsMeans}}, #' \item \code{\link{SimulationResultsMultiArmMeans}}, and #' \item \code{\link{SimulationResultsEnrichmentMeans}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_base_survival.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsMeans <- setRefClass("SimulationResultsMeans", contains = "SimulationResultsBaseMeans", fields = list( meanRatio = "logical", thetaH0 = "numeric", normalApproximation = "logical", alternative = "numeric", groups = "integer", directionUpper = "logical", effect = "numeric", earlyStop = "numeric", sampleSizes = "matrix", overallReject = "numeric", # = rejectedArmsPerStage in multi-arm rejectPerStage = "matrix", conditionalPowerAchieved = "matrix" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) } ) ) #' #' @name SimulationResultsMultiArmMeans #' #' @title #' Class for Simulation Results Multi-Arm Means #' #' @description #' A class for simulation results means in multi-arm designs. #' #' @template field_maxNumberOfIterations #' @template field_seed #' @template field_allocationRatioPlanned #' @template field_conditionalPower #' @template field_iterations #' @template field_futilityPerStage #' @template field_futilityStop #' @template field_stDev #' @template field_plannedSubjects #' @template field_minNumberOfSubjectsPerStage #' @template field_maxNumberOfSubjectsPerStage #' @template field_thetaH1 #' @template field_stDevH1 #' @template field_calcSubjectsFunction #' @template field_expectedNumberOfSubjects #' @template field_activeArms #' @template field_effectMatrix #' @template field_typeOfShape #' @template field_muMaxVector #' @template field_gED50 #' @template field_slope #' @template field_intersectionTest #' @template field_adaptations #' @template field_typeOfSelection #' @template field_effectMeasure #' @template field_successCriterion #' @template field_epsilonValue #' @template field_rValue #' @template field_threshold #' @template field_selectArmsFunction #' @template field_earlyStop #' @template field_selectedArms #' @template field_numberOfActiveArms #' @template field_rejectAtLeastOne #' @template field_rejectedArmsPerStage #' @template field_successPerStage #' @template field_sampleSizes #' @template field_conditionalPowerAchieved #' #' @details #' Use \code{\link[=getSimulationMultiArmMeans]{getSimulationMultiArmMeans()}} to create an object of this type. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_base_survival.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsMultiArmMeans <- setRefClass("SimulationResultsMultiArmMeans", contains = "SimulationResultsBaseMeans", fields = list( activeArms = "integer", effectMatrix = "matrix", typeOfShape = "character", muMaxVector = "numeric", gED50 = "numeric", slope = "numeric", intersectionTest = "character", adaptations = "logical", typeOfSelection = "character", effectMeasure = "character", successCriterion = "character", epsilonValue = "numeric", rValue = "numeric", threshold = "numeric", selectArmsFunction = "function", earlyStop = "matrix", selectedArms = "array", numberOfActiveArms = "matrix", rejectAtLeastOne = "numeric", rejectedArmsPerStage = "array", successPerStage = "matrix", sampleSizes = "array", conditionalPowerAchieved = "matrix" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) for (generatedParam in c( "rejectAtLeastOne", "selectedArms", "numberOfActiveArms", "rejectedArmsPerStage", "successPerStage" )) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) SimulationResultsBaseRates <- setRefClass("SimulationResultsBaseRates", contains = "SimulationResults", fields = list( directionUpper = "logical", plannedSubjects = "numeric", minNumberOfSubjectsPerStage = "numeric", maxNumberOfSubjectsPerStage = "numeric", calcSubjectsFunction = "ANY", expectedNumberOfSubjects = "numeric" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) generatedParams <- c( "iterations", "expectedNumberOfSubjects", "sampleSizes", "overallReject", "rejectPerStage", "futilityPerStage", "earlyStop" ) if (design$kMax > 2) { generatedParams <- c(generatedParams, "futilityStop") } for (generatedParam in generatedParams) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) #' #' @name SimulationResultsRates #' #' @title #' Class for Simulation Results Rates #' #' @description #' A class for simulation results rates. #' #' @template field_maxNumberOfIterations #' @template field_seed #' @template field_allocationRatioPlanned #' @template field_conditionalPower #' @template field_iterations #' @template field_futilityPerStage #' @template field_futilityStop #' @template field_directionUpper #' @template field_plannedSubjects #' @template field_maxNumberOfSubjects #' @template field_calcSubjectsFunction #' @template field_expectedNumberOfSubjects #' @template field_riskRatio #' @template field_thetaH0 #' @template field_normalApproximation #' @template field_pi1 #' @template field_pi2 #' @template field_groups #' @template field_pi1H1 #' @template field_pi2H1 #' @template field_effect #' @template field_earlyStop #' @template field_sampleSizes #' @template field_overallReject #' @template field_rejectPerStage #' @template field_conditionalPowerAchieved #' #' #' @details #' Use \code{\link[=getSimulationRates]{getSimulationRates()}} to create an object of this type. #' #' \code{SimulationResultsRates} is the basic class for #' \itemize{ #' \item \code{\link{SimulationResultsRates}}, #' \item \code{\link{SimulationResultsMultiArmRates}}, and #' \item \code{\link{SimulationResultsEnrichmentRates}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_base_survival.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsRates <- setRefClass("SimulationResultsRates", contains = "SimulationResultsBaseRates", fields = list( riskRatio = "logical", thetaH0 = "numeric", normalApproximation = "logical", pi1 = "numeric", pi2 = "numeric", groups = "integer", directionUpper = "logical", pi1H1 = "numeric", pi2H1 = "numeric", effect = "numeric", earlyStop = "numeric", sampleSizes = "matrix", overallReject = "numeric", rejectPerStage = "matrix", conditionalPowerAchieved = "matrix" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) generatedParams <- c( "effect", "iterations", "sampleSizes", "eventsNotAchieved", "expectedNumberOfSubjects", "overallReject", "rejectPerStage", "futilityPerStage", "earlyStop", "analysisTime", "studyDuration" ) if (design$kMax > 2) { generatedParams <- c(generatedParams, "futilityStop") } for (generatedParam in generatedParams) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) #' #' @name SimulationResultsMultiArmRates #' #' @title #' Class for Simulation Results Multi-Arm Rates #' #' @description #' A class for simulation results rates in multi-arm designs. #' #' @template field_maxNumberOfIterations #' @template field_seed #' @template field_allocationRatioPlanned #' @template field_conditionalPower #' @template field_iterations #' @template field_futilityPerStage #' @template field_futilityStop #' @template field_directionUpper #' @template field_plannedSubjects #' @template field_maxNumberOfSubjects #' @template field_calcSubjectsFunction #' @template field_expectedNumberOfSubjects #' @template field_activeArms #' @template field_effectMatrix #' @template field_typeOfShape #' @template field_piMaxVector #' @template field_piControl #' @template field_piH1 #' @template field_piControlH1 #' @template field_gED50 #' @template field_slope #' @template field_intersectionTest #' @template field_adaptations #' @template field_typeOfSelection #' @template field_effectMeasure #' @template field_successCriterion #' @template field_epsilonValue #' @template field_rValue #' @template field_threshold #' @template field_selectArmsFunction #' @template field_earlyStop #' @template field_selectedArms #' @template field_numberOfActiveArms #' @template field_rejectAtLeastOne #' @template field_rejectedArmsPerStage #' @template field_successPerStage #' @template field_sampleSizes #' @template field_conditionalPowerAchieved #' #' #' @details #' Use \code{\link[=getSimulationMultiArmRates]{getSimulationMultiArmRates()}} to create an object of this type. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_base_survival.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsMultiArmRates <- setRefClass("SimulationResultsMultiArmRates", contains = "SimulationResultsBaseRates", fields = list( activeArms = "integer", effectMatrix = "matrix", typeOfShape = "character", piMaxVector = "numeric", piControl = "numeric", piTreatmentsH1 = "numeric", piControlH1 = "numeric", gED50 = "numeric", slope = "numeric", intersectionTest = "character", adaptations = "logical", typeOfSelection = "character", effectMeasure = "character", successCriterion = "character", epsilonValue = "numeric", rValue = "numeric", threshold = "numeric", selectArmsFunction = "function", earlyStop = "matrix", selectedArms = "array", numberOfActiveArms = "matrix", rejectAtLeastOne = "numeric", rejectedArmsPerStage = "array", successPerStage = "matrix", sampleSizes = "array", conditionalPowerAchieved = "matrix" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) for (generatedParam in c( "rejectAtLeastOne", "selectedArms", "numberOfActiveArms", "rejectedArmsPerStage", "successPerStage" )) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) SimulationResultsBaseSurvival <- setRefClass("SimulationResultsBaseSurvival", contains = "SimulationResults", fields = list( directionUpper = "logical", plannedEvents = "numeric", minNumberOfEventsPerStage = "numeric", maxNumberOfEventsPerStage = "numeric", thetaH1 = "numeric", calcEventsFunction = "ANY", expectedNumberOfEvents = "numeric" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) generatedParams <- c( "iterations", "expectedNumberOfEvents", "eventsPerStage", "overallReject", "rejectPerStage", "futilityPerStage", "earlyStop" ) if (design$kMax > 2) { generatedParams <- c(generatedParams, "futilityStop") } for (generatedParam in generatedParams) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) #' #' @name SimulationResultsSurvival #' #' @title #' Class for Simulation Results Survival #' #' @description #' A class for simulation results survival. #' #' @template field_maxNumberOfIterations #' @template field_seed #' @template field_allocationRatioPlanned #' @template field_conditionalPower #' @template field_iterations #' @template field_futilityPerStage #' @template field_futilityStop #' @template field_directionUpper #' @template field_plannedEvents #' @template field_minNumberOfEventsPerStage #' @template field_maxNumberOfEventsPerStage #' @template field_thetaH1 #' @template field_calcEventsFunction #' @template field_expectedNumberOfEvents #' @template field_pi1_survival #' @template field_pi2_survival #' @template field_median1 #' @template field_median2 #' @template field_maxNumberOfSubjects #' @template field_accrualTime #' @template field_accrualIntensity #' @template field_dropoutRate1 #' @template field_dropoutRate2 #' @template field_dropoutTime #' @template field_eventTime #' @template field_thetaH0 #' @template field_allocation1 #' @template field_allocation2 #' @template field_kappa #' @template field_piecewiseSurvivalTime #' @template field_lambda1 #' @template field_lambda2 #' @template field_earlyStop #' @template field_hazardRatio #' @template field_studyDuration #' @template field_eventsNotAchieved #' @template field_numberOfSubjects #' @template field_numberOfSubjects1 #' @template field_numberOfSubjects2 #' @template field_eventsPerStage #' @template field_overallEventsPerStage #' @template field_expectedNumberOfSubjects #' @template field_rejectPerStage #' @template field_overallReject #' @template field_conditionalPowerAchieved #' #' @details #' Use \code{\link[=getSimulationSurvival]{getSimulationSurvival()}} to create an object of this type. #' #' \code{SimulationResultsSurvival} is the basic class for #' \itemize{ #' \item \code{\link{SimulationResultsSurvival}}, #' \item \code{\link{SimulationResultsMultiArmSurvival}}, and #' \item \code{\link{SimulationResultsEnrichmentSurvival}}. #' } #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_base_survival.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsSurvival <- setRefClass("SimulationResultsSurvival", contains = "SimulationResultsBaseSurvival", fields = list( .piecewiseSurvivalTime = "PiecewiseSurvivalTime", .accrualTime = "AccrualTime", pi1 = "numeric", pi2 = "numeric", median1 = "numeric", median2 = "numeric", maxNumberOfSubjects = "numeric", accrualTime = "numeric", accrualIntensity = "numeric", dropoutRate1 = "numeric", dropoutRate2 = "numeric", dropoutTime = "numeric", eventTime = "numeric", thetaH0 = "numeric", allocation1 = "numeric", allocation2 = "numeric", kappa = "numeric", piecewiseSurvivalTime = "numeric", lambda1 = "numeric", lambda2 = "numeric", earlyStop = "numeric", hazardRatio = "numeric", analysisTime = "matrix", studyDuration = "numeric", eventsNotAchieved = "matrix", numberOfSubjects = "matrix", numberOfSubjects1 = "matrix", numberOfSubjects2 = "matrix", eventsPerStage = "matrix", overallEventsPerStage = "matrix", expectedNumberOfSubjects = "numeric", rejectPerStage = "matrix", overallReject = "numeric", conditionalPowerAchieved = "matrix" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) generatedParams <- c( "hazardRatio", "iterations", "eventsPerStage", "singleNumberOfEventsPerStage", "expectedNumberOfEvents", "eventsNotAchieved", "numberOfSubjects", "expectedNumberOfSubjects", "overallReject", "rejectPerStage", "futilityPerStage", "earlyStop", "analysisTime", "studyDuration", "allocationRatioPlanned" ) if (design$kMax > 2) { generatedParams <- c(generatedParams, "futilityStop") } for (generatedParam in generatedParams) { .setParameterType(generatedParam, C_PARAM_GENERATED) } .setParameterType("numberOfSubjects1", C_PARAM_NOT_APPLICABLE) .setParameterType("numberOfSubjects2", C_PARAM_NOT_APPLICABLE) .setParameterType("median1", C_PARAM_NOT_APPLICABLE) .setParameterType("median2", C_PARAM_NOT_APPLICABLE) } ) ) #' #' @name SimulationResultsMultiArmSurvival #' #' @title #' Class for Simulation Results Multi-Arm Survival #' #' @description #' A class for simulation results survival in multi-arm designs. #' #' @template field_maxNumberOfIterations #' @template field_seed #' @template field_allocationRatioPlanned #' @template field_conditionalPower #' @template field_iterations #' @template field_futilityPerStage #' @template field_futilityStop #' @template field_directionUpper #' @template field_plannedEvents #' @template field_minNumberOfEventsPerStage #' @template field_maxNumberOfEventsPerStage #' @template field_expectedNumberOfEvents #' @template field_activeArms #' @template field_effectMatrix #' @template field_typeOfShape #' @template field_omegaMaxVector #' @template field_gED50 #' @template field_slope #' @template field_intersectionTest #' @template field_adaptations #' @template field_epsilonValue #' @template field_rValue #' @template field_threshold #' @template field_selectArmsFunction #' @template field_correlationComputation #' @template field_earlyStop #' @template field_selectedArms #' @template field_numberOfActiveArms #' @template field_rejectAtLeastOne #' @template field_rejectedArmsPerStage #' @template field_successPerStage #' @template field_eventsPerStage #' @template field_singleNumberOfEventsPerStage #' @template field_conditionalPowerAchieved #' #' @details #' Use \code{\link[=getSimulationMultiArmSurvival]{getSimulationMultiArmSurvival()}} to create an object of this type. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_base_survival.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsMultiArmSurvival <- setRefClass("SimulationResultsMultiArmSurvival", contains = "SimulationResultsBaseSurvival", fields = list( activeArms = "integer", effectMatrix = "matrix", typeOfShape = "character", omegaMaxVector = "numeric", gED50 = "numeric", slope = "numeric", intersectionTest = "character", adaptations = "logical", typeOfSelection = "character", effectMeasure = "character", successCriterion = "character", epsilonValue = "numeric", rValue = "numeric", threshold = "numeric", selectArmsFunction = "function", correlationComputation = "character", earlyStop = "matrix", selectedArms = "array", numberOfActiveArms = "matrix", rejectAtLeastOne = "numeric", rejectedArmsPerStage = "array", successPerStage = "matrix", eventsPerStage = "array", singleNumberOfEventsPerStage = "array", conditionalPowerAchieved = "matrix" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) for (generatedParam in c( "rejectAtLeastOne", "selectedArms", "numberOfActiveArms", "rejectedArmsPerStage", "successPerStage" )) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) #' #' @name SimulationResultsEnrichmentMeans #' #' @title #' Class for Simulation Results Enrichment Means #' #' @description #' A class for simulation results means in enrichment designs. #' #' @template field_maxNumberOfIterations #' @template field_seed #' @template field_allocationRatioPlanned #' @template field_conditionalPower #' @template field_iterations #' @template field_futilityPerStage #' @template field_futilityStop #' @template field_stDev #' @template field_plannedSubjects #' @template field_minNumberOfSubjectsPerStage #' @template field_maxNumberOfSubjectsPerStage #' @template field_thetaH1 #' @template field_stDevH1 #' @template field_calcSubjectsFunction #' @template field_expectedNumberOfSubjects #' @template field_populations #' @template field_effectList #' @template field_intersectionTest #' @template field_stratifiedAnalysis #' @template field_adaptations #' @template field_typeOfSelection #' @template field_effectMeasure #' @template field_successCriterion #' @template field_epsilonValue #' @template field_rValue #' @template field_threshold #' @template field_selectPopulationsFunction #' @template field_earlyStop #' @template field_selectedPopulations #' @template field_numberOfPopulations #' @template field_rejectAtLeastOne #' @template field_rejectedPopulationsPerStage #' @template field_successPerStage #' @template field_sampleSizes #' @template field_conditionalPowerAchieved #' #' @details #' Use \code{\link[=getSimulationEnrichmentMeans]{getSimulationEnrichmentMeans()}} to create an object of this type. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_base_survival.R #' @include class_simulation_results.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsEnrichmentMeans <- setRefClass("SimulationResultsEnrichmentMeans", contains = "SimulationResultsBaseMeans", fields = list( populations = "integer", effectList = "list", intersectionTest = "character", stratifiedAnalysis = "logical", adaptations = "logical", typeOfSelection = "character", effectMeasure = "character", successCriterion = "character", epsilonValue = "numeric", rValue = "numeric", threshold = "numeric", selectPopulationsFunction = "function", earlyStop = "matrix", selectedPopulations = "array", numberOfPopulations = "matrix", rejectAtLeastOne = "numeric", rejectedPopulationsPerStage = "array", successPerStage = "matrix", sampleSizes = "array", conditionalPowerAchieved = "matrix" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) for (generatedParam in c( "rejectAtLeastOne", "selectedPopulations", "numberOfPopulations", "rejectedPopulationsPerStage", "successPerStage" )) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) #' #' @name SimulationResultsEnrichmentRates #' #' @title #' Class for Simulation Results Enrichment Rates #' #' @description #' A class for simulation results rates in enrichment designs. #' #' @template field_maxNumberOfIterations #' @template field_seed #' @template field_allocationRatioPlanned #' @template field_conditionalPower #' @template field_iterations #' @template field_futilityPerStage #' @template field_futilityStop #' @template field_directionUpper #' @template field_plannedSubjects #' @template field_minNumberOfSubjectsPerStage #' @template field_maxNumberOfSubjectsPerStage #' @template field_calcSubjectsFunction #' @template field_expectedNumberOfSubjects #' @template field_populations #' @template field_effectList #' @template field_intersectionTest #' @template field_stratifiedAnalysis #' @template field_adaptations #' @template field_piTreatmentH1 #' @template field_piControlH1 #' @template field_typeOfSelection #' @template field_effectMeasure #' @template field_successCriterion #' @template field_epsilonValue #' @template field_rValue #' @template field_threshold #' @template field_selectPopulationsFunction #' @template field_earlyStop #' @template field_selectedPopulations #' @template field_numberOfPopulations #' @template field_rejectAtLeastOne #' @template field_rejectedPopulationsPerStage #' @template field_successPerStage #' @template field_sampleSizes #' @template field_conditionalPowerAchieved #' #' @details #' Use \code{\link[=getSimulationEnrichmentRates]{getSimulationEnrichmentRates()}} to create an object of this type. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_base_survival.R #' @include class_simulation_results.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsEnrichmentRates <- setRefClass("SimulationResultsEnrichmentRates", contains = "SimulationResultsBaseRates", fields = list( populations = "integer", effectList = "list", intersectionTest = "character", stratifiedAnalysis = "logical", adaptations = "logical", piTreatmentH1 = "numeric", piControlH1 = "numeric", typeOfSelection = "character", effectMeasure = "character", successCriterion = "character", epsilonValue = "numeric", rValue = "numeric", threshold = "numeric", selectPopulationsFunction = "function", earlyStop = "matrix", selectedPopulations = "array", numberOfPopulations = "matrix", rejectAtLeastOne = "numeric", rejectedPopulationsPerStage = "array", successPerStage = "matrix", sampleSizes = "array", conditionalPowerAchieved = "matrix" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) for (generatedParam in c( "rejectAtLeastOne", "selectedPopulations", "numberOfPopulations", "rejectedPopulationsPerStage", "successPerStage" )) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) #' #' @name SimulationResultsEnrichmentSurvival #' #' @title #' Class for Simulation Results Enrichment Survival #' #' @description #' A class for simulation results survival in enrichment designs. #' #' @template field_maxNumberOfIterations #' @template field_seed #' @template field_allocationRatioPlanned #' @template field_conditionalPower #' @template field_iterations #' @template field_futilityPerStage #' @template field_futilityStop #' @template field_directionUpper #' @template field_plannedSubjects #' @template field_minNumberOfSubjectsPerStage #' @template field_maxNumberOfSubjectsPerStage #' @template field_thetaH1_survival #' @template field_calcEventsFunction #' @template field_expectedNumberOfEvents #' @template field_populations #' @template field_effectList #' @template field_intersectionTest #' @template field_stratifiedAnalysis #' @template field_adaptations #' @template field_typeOfSelection #' @template field_effectMeasure #' @template field_successCriterion #' @template field_epsilonValue #' @template field_rValue #' @template field_threshold #' @template field_selectPopulationsFunction #' @template field_correlationComputation #' @template field_earlyStop #' @template field_selectedPopulations #' @template field_numberOfPopulations #' @template field_rejectAtLeastOne #' @template field_rejectedPopulationsPerStage #' @template field_successPerStage #' @template field_eventsPerStage #' @template field_singleNumberOfEventsPerStage #' @template field_conditionalPowerAchieved #' #' @details #' Use \code{\link[=getSimulationEnrichmentSurvival]{getSimulationEnrichmentSurvival()}} to create an object of this type. #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include class_design.R #' @include f_core_constants.R #' @include class_time.R #' @include f_simulation_base_survival.R #' @include class_simulation_results.R #' #' @keywords internal #' #' @importFrom methods new #' SimulationResultsEnrichmentSurvival <- setRefClass("SimulationResultsEnrichmentSurvival", contains = "SimulationResultsBaseSurvival", fields = list( populations = "integer", effectList = "list", intersectionTest = "character", stratifiedAnalysis = "logical", adaptations = "logical", typeOfSelection = "character", effectMeasure = "character", successCriterion = "character", epsilonValue = "numeric", rValue = "numeric", threshold = "numeric", selectPopulationsFunction = "function", correlationComputation = "character", earlyStop = "matrix", selectedPopulations = "array", numberOfPopulations = "matrix", rejectAtLeastOne = "numeric", rejectedPopulationsPerStage = "array", successPerStage = "matrix", eventsPerStage = "array", singleNumberOfEventsPerStage = "array", conditionalPowerAchieved = "matrix" ), methods = list( initialize = function(design, ...) { callSuper(design = design, ...) for (generatedParam in c( "rejectAtLeastOne", "selectedPopulations", "numberOfPopulations", "rejectedPopulationsPerStage", "successPerStage" )) { .setParameterType(generatedParam, C_PARAM_GENERATED) } } ) ) .assertIsValidVariedParameterVectorForSimulationResultsPlotting <- function(simulationResults, plotType) { if (inherits(simulationResults, "SimulationResultsMeans")) { if (is.null(simulationResults$alternative) || any(is.na(simulationResults$alternative)) || length(simulationResults$alternative) <= 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'alternative' with length > 1 is defined" ) } } else if (inherits(simulationResults, "SimulationResultsRates")) { if (is.null(simulationResults$pi1) || any(is.na(simulationResults$pi1)) || length(simulationResults$pi1) <= 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'pi1' with length > 1 is defined" ) } } else if (inherits(simulationResults, "SimulationResultsSurvival")) { if (is.null(simulationResults$hazardRatio) || any(is.na(simulationResults$hazardRatio)) || length(simulationResults$hazardRatio) <= 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is only available if 'hazardRatio' with length > 1 is defined or derived" ) } if (length(simulationResults$hazardRatio) != length(simulationResults$overallReject)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type ", plotType, " is not available for piecewise survival (only type 13 and 14)" ) } } } .getSimulationPlotXAxisParameterName <- function(simulationResults, showSource = FALSE, simulationResultsName = NA_character_) { if (grepl("SimulationResultsEnrichment", .getClassName(simulationResults))) { effectDataList <- .getSimulationEnrichmentEffectData(simulationResults) if (ncol(effectDataList$effectData) == 1) { if (!isFALSE(showSource)) { return(paste0(simulationResultsName, "$effectList$", effectDataList$effectMatrixName, "[, 1]")) } return(sub("s$", "", effectDataList$effectMatrixName)) } if (!isFALSE(showSource)) { numberOfSituations <- nrow(simulationResults$effectList[[effectDataList$effectMatrixName]]) return(paste0("c(1:", numberOfSituations, ")")) } return("situation") } survivalEnabled <- grepl("Survival", .getClassName(simulationResults)) meansEnabled <- grepl("Means", .getClassName(simulationResults)) if (grepl("MultiArm", .getClassName(simulationResults))) { if (!isFALSE(showSource)) { gMax <- nrow(simulationResults$effectMatrix) return(paste0(simulationResultsName, "$effectMatrix[", gMax, ", ]")) } return("effectMatrix") } if (grepl("Survival", .getClassName(simulationResults))) { return("hazardRatio") } return("effect") } .getSimulationPlotXAxisLabel <- function(simulationResults, xlab = NULL) { if (grepl("SimulationResultsEnrichment", .getClassName(simulationResults))) { effectDataList <- .getSimulationEnrichmentEffectData(simulationResults) if (ncol(effectDataList$effectData) == 1) { xLabel <- simulationResults$.parameterNames[[effectDataList$effectMatrixName]] return(sub("s$", "", xLabel)) } return("Situation") } multiArmEnabled <- grepl("MultiArm", .getClassName(simulationResults)) userDefinedEffectMatrix <- multiArmEnabled && simulationResults$.getParameterType("effectMatrix") == C_PARAM_USER_DEFINED if (!is.null(xlab) && !is.na(xlab)) { return(xlab) } if (!multiArmEnabled) { return("Effect") } return(ifelse(userDefinedEffectMatrix, "Effect Matrix Row", "Maximum Effect")) } .getPowerAndStoppingProbabilities <- function(simulationResults, xValues, parameters) { yParameterNames <- c() if ("expectedNumberOfEvents" %in% parameters) { yParameterNames <- c(yParameterNames, "expectedNumberOfEvents") } if ("expectedNumberOfSubjects" %in% parameters) { yParameterNames <- c(yParameterNames, "expectedNumberOfSubjects") } if ("rejectAtLeastOne" %in% parameters) { yParameterNames <- c(yParameterNames, "rejectAtLeastOne") } if ("futilityStop" %in% parameters) { yParameterNames <- c(yParameterNames, "futilityStop") } yParameterNamesSrc <- yParameterNames data <- NULL for (yParameterName in yParameterNames) { category <- simulationResults$.parameterNames[[yParameterName]] part <- data.frame( categories = rep(category, length(xValues)), xValues = xValues, yValues = simulationResults[[yParameterName]] ) if (is.null(data)) { data <- part } else { data <- rbind(data, part) } } if ("earlyStop" %in% parameters) { yParameterNames <- c(yParameterNames, "earlyStop") maxEarlyStoppingStages <- nrow(simulationResults$earlyStop) for (k in 1:maxEarlyStoppingStages) { category <- "Early stop" if (maxEarlyStoppingStages > 1) { category <- paste0(category, ", stage ", k) } part <- data.frame( categories = rep(category, ncol(simulationResults$earlyStop)), xValues = xValues, yValues = simulationResults$earlyStop[k, ] ) data <- rbind(data, part) yParameterNamesSrc <- c(yParameterNamesSrc, paste0("earlyStop[", k, ", ]")) } } return(list( data = data, yParameterNames = yParameterNames, yParameterNamesSrc = yParameterNamesSrc )) } .plotSimulationResults <- function(simulationResults, designMaster, type = 5L, main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, simulationResultsName = NA_character_, plotSettings = NULL, ...) { .assertGgplotIsInstalled() .assertIsSimulationResults(simulationResults) .assertIsValidLegendPosition(legendPosition) .assertIsSingleInteger(type, "type", naAllowed = FALSE, validateType = FALSE) theta <- .assertIsValidThetaRange(thetaRange = theta) if (is.null(plotSettings)) { plotSettings <- simulationResults$.plotSettings } survivalEnabled <- grepl("Survival", .getClassName(simulationResults)) meansEnabled <- grepl("Means", .getClassName(simulationResults)) multiArmEnabled <- grepl("MultiArm", .getClassName(simulationResults)) enrichmentEnabled <- grepl("Enrichment", .getClassName(simulationResults)) userDefinedEffectMatrix <- multiArmEnabled && simulationResults$.getParameterType("effectMatrix") == C_PARAM_USER_DEFINED gMax <- NA_integer_ if (multiArmEnabled || enrichmentEnabled) { gMax <- ifelse(multiArmEnabled, simulationResults$activeArms, simulationResults$populations ) } if (survivalEnabled) { nMax <- simulationResults$expectedNumberOfEvents[1] # use first value for plotting } else { nMax <- simulationResults$expectedNumberOfSubjects[1] # use first value for plotting } if (type %in% c(1:3) && !multiArmEnabled && !enrichmentEnabled) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not available for non-multi-arm/non-enrichment simulation results (type must be > 3)" ) } if ((!survivalEnabled || multiArmEnabled || enrichmentEnabled) && type %in% c(10:14)) { if (multiArmEnabled || enrichmentEnabled) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is only available for non-multi-arm/non-enrichment survival simulation results" ) } else { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is only available for survival simulation results" ) } } variedParameters <- logical(0) if (is.na(plotPointsEnabled)) { plotPointsEnabled <- userDefinedEffectMatrix } showSourceHint <- "" discreteXAxis <- FALSE effectData <- NULL xValues <- NA_integer_ if (multiArmEnabled) { effectData <- simulationResults$effectMatrix effectDataParamName <- paste0("effectMatrix", "[", gMax, ", ]") xParameterNameSrc <- paste0(simulationResultsName, "$", effectDataParamName) xValues <- effectData[gMax, ] } else { if (enrichmentEnabled) { effectDataList <- .getSimulationEnrichmentEffectData(simulationResults) xValues <- effectDataList$xValues discreteXAxis <- effectDataList$discreteXAxis if (length(xValues) <= 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "2 ore more situations must be specifed in ", sQuote(paste0("effectList$", effectDataList$effectMatrixName)) ) } } xParameterNameSrc <- .getSimulationPlotXAxisParameterName(simulationResults, showSource = showSource, simulationResultsName = simulationResultsName ) } armCaption <- ifelse(enrichmentEnabled, "Population", "Arm") armsCaption <- paste0(armCaption, "s") srcCmd <- NULL if (type == 1) { # Multi-arm, Overall Success .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { main <- PlotSubTitleItems(title = "Overall Success") .addPlotSubTitleItems(simulationResults, designMaster, main, type) } data <- data.frame( xValues = xValues, yValues = colSums(simulationResults$successPerStage) ) if (userDefinedEffectMatrix) { data$xValues <- 1:nrow(data) } legendPosition <- ifelse(is.na(legendPosition), C_POSITION_LEFT_CENTER, legendPosition) srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterNameSrc, yParameterNames = paste0("colSums(", simulationResultsName, "$successPerStage)"), hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } return(.plotDataFrame(data, mainTitle = main, xlab = NA_character_, ylab = NA_character_, xAxisLabel = .getSimulationPlotXAxisLabel(simulationResults), yAxisLabel1 = "Overall Success", yAxisLabel2 = NA_character_, plotPointsEnabled = plotPointsEnabled, legendTitle = NA_character_, legendPosition = legendPosition, sided = designMaster$sided, palette = palette, plotSettings = plotSettings, discreteXAxis = discreteXAxis )) } else if (type == 2) { # Multi-arm, Success per Stage .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { main <- PlotSubTitleItems(title = "Success per Stage") .addPlotSubTitleItems(simulationResults, designMaster, main, type) } yParameterNamesSrc <- c() data <- NULL if (designMaster$kMax > 1) { for (k in 1:designMaster$kMax) { part <- data.frame( categories = rep(k, length(xValues)), xValues = xValues, yValues = simulationResults$successPerStage[k, ] ) if (userDefinedEffectMatrix) { part$xValues <- 1:nrow(part) } if (is.null(data)) { data <- part } else { data <- rbind(data, part) } yParameterNamesSrc <- c(yParameterNamesSrc, paste0("successPerStage[", k, ", ]")) } } else { data <- data.frame( xValues = xValues, yValues = simulationResults$successPerStage[1, ] ) if (userDefinedEffectMatrix) { data$xValues <- 1:nrow(data) } yParameterNamesSrc <- c(yParameterNamesSrc, "successPerStage[1, ]") } legendPosition <- ifelse(is.na(legendPosition), C_POSITION_LEFT_TOP, legendPosition) srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } return(.plotDataFrame(data, mainTitle = main, xlab = NA_character_, ylab = NA_character_, xAxisLabel = .getSimulationPlotXAxisLabel(simulationResults), yAxisLabel1 = "Success", yAxisLabel2 = NA_character_, plotPointsEnabled = plotPointsEnabled, legendTitle = "Stage", legendPosition = legendPosition, sided = designMaster$sided, palette = palette, plotSettings = plotSettings, discreteXAxis = discreteXAxis )) } else if (type == 3) { # Multi-arm, Selected Arms/Populations per Stage .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { main <- PlotSubTitleItems(title = paste0("Selected ", armsCaption, " per Stage")) .addPlotSubTitleItems(simulationResults, designMaster, main, type) } selectedDataParamName <- ifelse(multiArmEnabled, "selectedArms", "selectedPopulations") selectedData <- simulationResults[[selectedDataParamName]] yParameterNamesSrc <- c() data <- NULL if (designMaster$kMax > 1) { for (g in 1:gMax) { for (k in 2:designMaster$kMax) { stages <- rep(k, length(xValues)) populationCaption <- g if (enrichmentEnabled) { populationCaption <- ifelse(g == gMax, "F", paste0("S", g)) } part <- data.frame( categories = ifelse(designMaster$kMax > 2, paste0(populationCaption, ", ", stages), populationCaption ), xValues = xValues, yValues = selectedData[k, , g] ) if (userDefinedEffectMatrix) { part$xValues <- 1:nrow(part) } if (is.null(data)) { data <- part } else { data <- rbind(data, part) } yParameterNamesSrc <- c(yParameterNamesSrc, paste0(selectedDataParamName, "[", k, ", , ", g, "]")) } } } else { for (g in 1:gMax) { part <- data.frame( categories = g, xValues = xValues, yValues = selectedData[1, , g] ) if (userDefinedEffectMatrix) { data$xValues <- 1:nrow(data) } if (is.null(data)) { data <- part } else { data <- rbind(data, part) } yParameterNamesSrc <- c(yParameterNamesSrc, paste0(selectedDataParamName, "[1, , ", g, "]")) } } legendPosition <- ifelse(is.na(legendPosition), C_POSITION_LEFT_TOP, legendPosition) srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } legendTitle <- ifelse(gMax > 1, ifelse(designMaster$kMax > 2, paste0(armCaption, ", Stage"), armCaption), ifelse(designMaster$kMax > 2, "Stage", armCaption) ) return(.plotDataFrame(data, mainTitle = main, xlab = NA_character_, ylab = NA_character_, xAxisLabel = .getSimulationPlotXAxisLabel(simulationResults), yAxisLabel1 = paste0("Selected ", armsCaption), yAxisLabel2 = NA_character_, plotPointsEnabled = plotPointsEnabled, legendTitle = legendTitle, legendPosition = legendPosition, sided = designMaster$sided, palette = palette, plotSettings = plotSettings, discreteXAxis = discreteXAxis )) } else if (type == 4) { # Multi-arm, Rejected Arms/Populations per Stage .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { main <- PlotSubTitleItems(title = ifelse(!multiArmEnabled, "Reject per Stage", ifelse(designMaster$kMax > 1, paste0("Rejected ", armsCaption, " per Stage"), paste0("Rejected ", armsCaption) ) )) .addPlotSubTitleItems(simulationResults, designMaster, main, type) } yParameterNamesSrc <- c() data <- NULL if (multiArmEnabled || enrichmentEnabled) { rejectedDataParamName <- ifelse(multiArmEnabled, "rejectedArmsPerStage", "rejectedPopulationsPerStage") rejectedData <- simulationResults[[rejectedDataParamName]] if (designMaster$kMax > 1) { for (g in 1:gMax) { for (k in 1:designMaster$kMax) { stages <- rep(k, length(xValues)) populationCaption <- g if (enrichmentEnabled) { populationCaption <- ifelse(g == gMax, "F", paste0("S", g)) } part <- data.frame( categories = ifelse(gMax > 1, paste0(populationCaption, ", ", stages), stages), xValues = xValues, yValues = rejectedData[k, , g] ) if (userDefinedEffectMatrix) { part$xValues <- 1:nrow(part) } if (is.null(data)) { data <- part } else { data <- rbind(data, part) } yParameterNamesSrc <- c(yParameterNamesSrc, paste0(rejectedDataParamName, "[", k, ", , ", g, "]")) } } } else { for (g in 1:gMax) { part <- data.frame( categories = g, xValues = xValues, yValues = rejectedData[1, , g] ) if (userDefinedEffectMatrix) { part$xValues <- 1:nrow(part) } if (is.null(data)) { data <- part } else { data <- rbind(data, part) } yParameterNamesSrc <- c(yParameterNamesSrc, paste0(rejectedDataParamName, "[1, , ", g, "]")) } } } else { xParameterName <- .getSimulationPlotXAxisParameterName(simulationResults) if (designMaster$kMax > 1) { for (k in 1:designMaster$kMax) { part <- data.frame( categories = k, xValues = simulationResults[[xParameterName]], yValues = simulationResults$rejectPerStage[k, ] ) if (userDefinedEffectMatrix) { part$xValues <- 1:nrow(part) } if (is.null(data)) { data <- part } else { data <- rbind(data, part) } yParameterNamesSrc <- c(yParameterNamesSrc, paste0("rejectPerStage[", k, ", ]")) } } else { data <- data.frame( xValues = simulationResults[[xParameterName]], yValues = simulationResults$rejectPerStage[1, ] ) if (userDefinedEffectMatrix) { data$xValues <- 1:nrow(data) } yParameterNamesSrc <- c(yParameterNamesSrc, "rejectPerStage[1, ]") } } legendPosition <- ifelse(is.na(legendPosition), C_POSITION_LEFT_TOP, legendPosition) srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } palette <- NULL legendTitle <- "Stage" if (multiArmEnabled) { legendTitle <- ifelse(designMaster$kMax > 1, paste0(armCaption, ", Stage"), armCaption) } else if (enrichmentEnabled) { legendTitle <- ifelse(gMax > 1, paste0(armCaption, ", Stage"), "Stage") } yAxisLabel1 <- ifelse(.isMultiArmSimulationResults(simulationResults), paste0("Rejected ", armsCaption), "Rejection Probability" ) return(.plotDataFrame(data, mainTitle = main, xlab = NA_character_, ylab = NA_character_, xAxisLabel = .getSimulationPlotXAxisLabel(simulationResults), yAxisLabel1 = yAxisLabel1, yAxisLabel2 = NA_character_, plotPointsEnabled = plotPointsEnabled, legendTitle = legendTitle, legendPosition = legendPosition, sided = designMaster$sided, palette = palette, plotSettings = plotSettings, discreteXAxis = discreteXAxis )) } else if (type == 5) { # Power and Stopping Probabilities .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { main <- PlotSubTitleItems(title = ifelse(designMaster$kMax == 1, "Overall Power", "Overall Power and Early Stopping" )) .addPlotSubTitleItems(simulationResults, designMaster, main, type) } xParameterName <- .getSimulationPlotXAxisParameterName(simulationResults) if ((multiArmEnabled || enrichmentEnabled) && designMaster$kMax > 1) { powerAndStoppingProbabilities <- .getPowerAndStoppingProbabilities(simulationResults, xValues = xValues, parameters = c("rejectAtLeastOne", "futilityStop", "earlyStop") ) data <- powerAndStoppingProbabilities$data yParameterNames <- powerAndStoppingProbabilities$yParameterNames yParameterNamesSrc <- powerAndStoppingProbabilities$yParameterNamesSrc } else { yParameterNames <- ifelse(multiArmEnabled || enrichmentEnabled, "rejectAtLeastOne", "overallReject") if (designMaster$kMax > 1) { if (!multiArmEnabled && !enrichmentEnabled) { yParameterNames <- c(yParameterNames, "earlyStop") } yParameterNames <- c(yParameterNames, "futilityStop") } yParameterNamesSrc <- yParameterNames } xlab <- .getSimulationPlotXAxisLabel(simulationResults, xlab) ylab <- ifelse(is.na(ylab), "", ylab) legendPosition <- ifelse(is.na(legendPosition), C_POSITION_LEFT_TOP, legendPosition) srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } if ((multiArmEnabled || enrichmentEnabled) && designMaster$kMax > 1) { return(.plotDataFrame(data, mainTitle = main, xlab = xlab, ylab = ylab, xAxisLabel = .getSimulationPlotXAxisLabel(simulationResults), yAxisLabel1 = NA_character_, yAxisLabel2 = NA_character_, plotPointsEnabled = plotPointsEnabled, legendTitle = NA_character_, legendPosition = legendPosition, sided = designMaster$sided, palette = palette, plotSettings = plotSettings, discreteXAxis = discreteXAxis )) } else { if (is.null(list(...)[["ylim"]])) { ylim <- c(0, 1) return(.plotParameterSet( parameterSet = simulationResults, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = FALSE, yAxisScalingEnabled = FALSE, plotSettings = plotSettings, ylim = ylim # , ... )) # ratioEnabled = TRUE } else { return(.plotParameterSet( parameterSet = simulationResults, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = FALSE, yAxisScalingEnabled = FALSE, plotSettings = plotSettings # , ... )) } } } else if (type == 6) { # Average Sample Size / Average Event Number .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { titlePart <- paste0("Expected ", ifelse(survivalEnabled, "Number of Events", "Number of Subjects")) main <- PlotSubTitleItems(title = paste0( titlePart, ifelse(designMaster$kMax == 1, "", paste0( " and Power", ifelse(multiArmEnabled, "", " / Early Stop") )) )) .addPlotSubTitleItems(simulationResults, designMaster, main, type) } xParameterName <- .getSimulationPlotXAxisParameterName(simulationResults) yParameterNames <- ifelse(survivalEnabled, "expectedNumberOfEvents", "expectedNumberOfSubjects") if (designMaster$kMax > 1) { if (multiArmEnabled || enrichmentEnabled) { yParameterNames <- c(yParameterNames, "rejectAtLeastOne") } else { yParameterNames <- c(yParameterNames, "overallReject") } yParameterNames <- c(yParameterNames, "earlyStop") } xlab <- .getSimulationPlotXAxisLabel(simulationResults, xlab) legendPosition <- ifelse(is.na(legendPosition), C_POSITION_LEFT_CENTER, legendPosition) srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 7) { .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { main <- PlotSubTitleItems(title = "Overall Power") .addPlotSubTitleItems(simulationResults, designMaster, main, type) } xParameterName <- .getSimulationPlotXAxisParameterName(simulationResults) yParameterNames <- ifelse(multiArmEnabled || enrichmentEnabled, "rejectAtLeastOne", "overallReject") xlab <- .getSimulationPlotXAxisLabel(simulationResults, xlab) legendPosition <- ifelse(is.na(legendPosition), C_POSITION_RIGHT_CENTER, legendPosition) srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 8) { if (designMaster$kMax == 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "plot type 8 (Early Stopping) is not available for 'kMax' = 1") } .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) futilityStopEnabled <- !is.null(simulationResults[["futilityStop"]]) && !all(na.omit(simulationResults$futilityStop) == 0) if (is.na(main)) { main <- PlotSubTitleItems(title = paste0( "Overall Early Stopping", ifelse(futilityStopEnabled, " and Futility Stopping", "") )) .addPlotSubTitleItems(simulationResults, designMaster, main, type) } xParameterName <- .getSimulationPlotXAxisParameterName(simulationResults) yParameterNames <- c("earlyStop") if (futilityStopEnabled) { yParameterNames <- c(yParameterNames, "futilityStop") } xlab <- .getSimulationPlotXAxisLabel(simulationResults, xlab) legendPosition <- ifelse(is.na(legendPosition), C_POSITION_LEFT_CENTER, legendPosition) srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 9) { .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { main <- PlotSubTitleItems(title = ifelse(survivalEnabled, "Expected Number of Events", "Expected Number of Subjects" )) .addPlotSubTitleItems(simulationResults, designMaster, main, type) } xParameterName <- .getSimulationPlotXAxisParameterName(simulationResults) yParameterNames <- ifelse(survivalEnabled, "expectedNumberOfEvents", "expectedNumberOfSubjects") xlab <- .getSimulationPlotXAxisLabel(simulationResults, xlab) srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterNameSrc, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 10) { # Study Duration .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { main <- PlotSubTitleItems(title = "Study Duration") .addPlotSubTitleItems(simulationResults, designMaster, main, type) } xParameterName <- "hazardRatio" yParameterNames <- "studyDuration" srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 11) { .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { main <- PlotSubTitleItems(title = "Expected Number of Subjects") .addPlotSubTitleItems(simulationResults, designMaster, main, type) } xParameterName <- "hazardRatio" yParameterNames <- "expectedNumberOfSubjects" srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterName, yParameterNames = yParameterNames, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) } else if (type == 12) { # Analysis Time .assertIsValidVariedParameterVectorForSimulationResultsPlotting(simulationResults, type) if (is.na(main)) { main <- PlotSubTitleItems(title = "Analysis Time") .addPlotSubTitleItems(simulationResults, designMaster, main, type) } xParameterName <- "hazardRatio" yParameterNames <- "analysisTime" yParameterNamesSrc <- c() for (i in 1:nrow(simulationResults[["analysisTime"]])) { yParameterNamesSrc <- c(yParameterNamesSrc, paste0("analysisTime[", i, ", ]")) } data <- NULL for (k in 1:designMaster$kMax) { part <- data.frame( categories = rep(k, length(simulationResults$hazardRatio)), xValues = simulationResults$hazardRatio, yValues = simulationResults$analysisTime[k, ] ) if (is.null(data)) { data <- part } else { data <- rbind(data, part) } } if (is.na(legendPosition)) { legendPosition <- C_POSITION_LEFT_CENTER } srcCmd <- .showPlotSourceInformation( objectName = simulationResultsName, xParameterName = xParameterName, yParameterNames = yParameterNamesSrc, hint = showSourceHint, nMax = nMax, type = type, showSource = showSource ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } return(.plotDataFrame(data, mainTitle = main, xlab = NA_character_, ylab = NA_character_, xAxisLabel = "Hazard Ratio", yAxisLabel1 = "Analysis Time", yAxisLabel2 = NA_character_, plotPointsEnabled = TRUE, legendTitle = "Stage", legendPosition = legendPosition, sided = designMaster$sided, plotSettings = plotSettings, discreteXAxis = discreteXAxis )) } else if (type == 13 || type == 14) { # Cumulative Distribution Function / Survival function return(.plotSurvivalFunction(simulationResults, designMaster = designMaster, type = type, main = main, xlab = xlab, ylab = ylab, palette = palette, legendPosition = legendPosition, designPlanName = simulationResultsName, showSource = showSource, plotSettings = plotSettings )) } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 5, 6, ..., 14") } if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } return(.plotParameterSet( parameterSet = simulationResults, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, variedParameters = variedParameters, qnormAlphaLineEnabled = (type != 2), ratioEnabled = TRUE, plotSettings = plotSettings # , ... )) } #' #' @title #' Simulation Results Plotting #' #' @param x The simulation results, obtained from \cr #' \code{\link[=getSimulationSurvival]{getSimulationSurvival()}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @inheritParams param_palette #' @inheritParams param_theta #' @inheritParams param_plotPointsEnabled #' @inheritParams param_showSource #' @inheritParams param_plotSettings #' @inheritParams param_legendPosition #' @inheritParams param_grid #' @param type The plot type (default = \code{1}). The following plot types are available: #' \itemize{ #' \item \code{1}: creates a 'Overall Success' plot (multi-arm and enrichment only) #' \item \code{2}: creates a 'Success per Stage' plot (multi-arm and enrichment only) #' \item \code{3}: creates a 'Selected Arms per Stage' plot (multi-arm and enrichment only) #' \item \code{4}: creates a 'Reject per Stage' or 'Rejected Arms per Stage' plot #' \item \code{5}: creates a 'Overall Power and Early Stopping' plot #' \item \code{6}: creates a 'Expected Number of Subjects and Power / Early Stop' or #' 'Expected Number of Events and Power / Early Stop' plot #' \item \code{7}: creates an 'Overall Power' plot #' \item \code{8}: creates an 'Overall Early Stopping' plot #' \item \code{9}: creates an 'Expected Sample Size' or 'Expected Number of Events' plot #' \item \code{10}: creates a 'Study Duration' plot (non-multi-arm and non-enrichment survival only) #' \item \code{11}: creates an 'Expected Number of Subjects' plot (non-multi-arm and non-enrichment survival only) #' \item \code{12}: creates an 'Analysis Times' plot (non-multi-arm and non-enrichment survival only) #' \item \code{13}: creates a 'Cumulative Distribution Function' plot (non-multi-arm and non-enrichment survival only) #' \item \code{14}: creates a 'Survival Function' plot (non-multi-arm and non-enrichment survival only) #' \item \code{"all"}: creates all available plots and returns it as a grid plot or list #' } #' @inheritParams param_three_dots_plot #' #' @description #' Plots simulation results. #' #' @details #' Generic function to plot all kinds of simulation results. #' #' @template return_object_ggplot #' #' @examples #' \dontrun{ #' results <- getSimulationMeans( #' alternative = 0:4, stDev = 5, #' plannedSubjects = 40, maxNumberOfIterations = 1000 #' ) #' plot(results, type = 5) #' } #' #' @export #' plot.SimulationResults <- function(x, y, ..., main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1L, palette = "Set1", theta = seq(-1, 1, 0.01), plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, grid = 1, plotSettings = NULL) { fCall <- match.call(expand.dots = FALSE) simulationResultsName <- deparse(fCall$x) .assertGgplotIsInstalled() .assertIsSingleInteger(grid, "grid", validateType = FALSE) typeNumbers <- .getPlotTypeNumber(type, x) if (is.null(plotSettings)) { plotSettings <- .getGridPlotSettings(x, typeNumbers, grid) } p <- NULL plotList <- list() for (typeNumber in typeNumbers) { p <- .plotSimulationResults( simulationResults = x, designMaster = x$.design, main = main, xlab = xlab, ylab = ylab, type = typeNumber, palette = palette, theta = theta, plotPointsEnabled = plotPointsEnabled, legendPosition = .getGridLegendPosition(legendPosition, typeNumbers, grid), showSource = showSource, simulationResultsName = simulationResultsName, plotSettings = plotSettings, ... ) .printPlotShowSourceSeparator(showSource, typeNumber, typeNumbers) if (length(typeNumbers) > 1) { caption <- .getPlotCaption(x, typeNumber, stopIfNotFound = TRUE) plotList[[caption]] <- p } } if (length(typeNumbers) == 1) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(p)) } return(p) } if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(plotList)) } return(.createPlotResultObject(plotList, grid)) } #' #' @title #' Print Simulation Results #' #' @description #' \code{print} prints its \code{SimulationResults} argument and returns it invisibly (via \code{invisible(x)}). #' #' @param x The \code{\link{SimulationResults}} object to print. #' @param markdown If \code{TRUE}, the object \code{x} will be printed using markdown syntax; #' normal representation will be used otherwise (default is \code{FALSE}) #' @inheritParams param_three_dots #' #' @details #' Prints the parameters and results of an \code{SimulationResults} object. #' #' @export #' #' @keywords internal #' print.SimulationResults <- function(x, ..., showStatistics = FALSE, markdown = FALSE) { if (markdown) { x$.catMarkdownText(showStatistics = showStatistics) return(invisible(x)) } x$show(showStatistics = showStatistics) invisible(x) } #' #' @title #' Get Simulation Data #' #' @description #' Returns the aggregated simulation data. #' #' @param x A \code{\link{SimulationResults}} object created by \code{\link[=getSimulationMeans]{getSimulationMeans()}},\cr #' \code{\link[=getSimulationRates]{getSimulationRates()}}, \code{\link[=getSimulationSurvival]{getSimulationSurvival()}}, \code{\link[=getSimulationMultiArmMeans]{getSimulationMultiArmMeans()}},\cr #' \code{\link[=getSimulationMultiArmRates]{getSimulationMultiArmRates()}}, or \code{\link[=getSimulationMultiArmSurvival]{getSimulationMultiArmSurvival()}}. #' #' @details #' This function can be used to get the aggregated simulated data from an simulation results #' object, for example, obtained by \code{\link[=getSimulationSurvival]{getSimulationSurvival()}}. #' In this case, the data frame contains the following columns: #' \enumerate{ #' \item \code{iterationNumber}: The number of the simulation iteration. #' \item \code{stageNumber}: The stage. #' \item \code{pi1}: The assumed or derived event rate in the treatment group. #' \item \code{pi2}: The assumed or derived event rate in the control group. #' \item \code{hazardRatio}: The hazard ratio under consideration (if available). #' \item \code{analysisTime}: The analysis time. #' \item \code{numberOfSubjects}: The number of subjects under consideration when the #' (interim) analysis takes place. #' \item \code{eventsPerStage1}: The observed number of events per stage #' in treatment group 1. #' \item \code{eventsPerStage2}: The observed number of events per stage #' in treatment group 2. #' \item \code{eventsPerStage}: The observed number of events per stage #' in both treatment groups. #' \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. #' \item \code{eventsNotAchieved}: 1 if number of events could not be reached with #' observed number of subjects, 0 otherwise. #' \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. #' \item \code{testStatistic}: The test statistic that is used for the test decision, #' depends on which design was chosen (group sequential, inverse normal, #' or Fisher combination test)' #' \item \code{logRankStatistic}: Z-score statistic which corresponds to a one-sided #' log-rank test at considered stage. #' \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for #' selected sample size and effect. The effect is either estimated from the data or can be #' user defined with \code{thetaH1} or \code{pi1H1} and \code{pi2H1}. #' \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. #' \item \code{hazardRatioEstimateLR}: The estimated hazard ratio, derived from the #' log-rank statistic. #' } #' A subset of variables is provided for \code{\link[=getSimulationMeans]{getSimulationMeans()}}, \code{\link[=getSimulationRates]{getSimulationRates()}}, \code{\link[=getSimulationMultiArmMeans]{getSimulationMultiArmMeans()}},\cr #' \code{\link[=getSimulationMultiArmRates]{getSimulationMultiArmRates()}}, or \code{\link[=getSimulationMultiArmSurvival]{getSimulationMultiArmSurvival()}}. #' #' @template return_dataframe #' #' @examples #' results <- getSimulationSurvival( #' pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, eventTime = 12, #' accrualTime = 24, plannedEvents = 40, maxNumberOfSubjects = 200, #' maxNumberOfIterations = 50 #' ) #' data <- getData(results) #' head(data) #' dim(data) #' #' @export #' getData <- function(x) { if (!inherits(x, "SimulationResults")) { # or 'Dataset' stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'x' must be a 'SimulationResults' object; for example, use getSimulationMeans() to create one" ) } return(x$.data) } #' @rdname getData #' @export getData.SimulationResults <- function(x) { return(x$.data) } .getAggregatedDataByIterationNumber <- function(rawData, iterationNumber, pi1 = NA_real_) { if (!is.na(pi1)) { if (is.null(rawData[["pi1"]])) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'rawData' does not contains a 'pi1' column") } subData <- rawData[rawData$iterationNumber == iterationNumber & rawData$pi1 == pi1, ] if (nrow(subData) == 0) { return(NULL) } } else { subData <- rawData[rawData$iterationNumber == iterationNumber, ] } eventsPerStage1 <- sum(subData$event[subData$treatmentGroup == 1]) eventsPerStage2 <- sum(subData$event[subData$treatmentGroup == 2]) result <- data.frame( iterationNumber = iterationNumber, pi1 = pi1, stageNumber = subData$stopStage[1], analysisTime = max(subData$observationTime), numberOfSubjects = nrow(subData), eventsPerStage1 = eventsPerStage1, eventsPerStage2 = eventsPerStage2, eventsPerStage = eventsPerStage1 + eventsPerStage2 ) if (is.na(pi1)) { result <- result[, colnames(result) != "pi1"] } return(result) } .getAggregatedData <- function(rawData) { iterationNumbers <- sort(unique(rawData$iterationNumber)) pi1Vec <- rawData[["pi1"]] if (!is.null(pi1Vec)) { pi1Vec <- sort(unique(na.omit(rawData$pi1))) } data <- NULL if (!is.null(pi1Vec) && length(pi1Vec) > 0) { for (iterationNumber in iterationNumbers) { for (pi1 in pi1Vec) { row <- .getAggregatedDataByIterationNumber(rawData, iterationNumber, pi1) if (!is.null(row)) { if (is.null(data)) { data <- row } else { data <- rbind(data, row) } } } } } else { for (iterationNumber in iterationNumbers) { row <- .getAggregatedDataByIterationNumber(rawData, iterationNumber) if (!is.null(row)) { if (is.null(data)) { data <- row } else { data <- rbind(data, row) } } } } return(data) } #' #' @title #' Get Simulation Raw Data for Survival #' #' @description #' Returns the raw survival data which was generated for simulation. #' #' @param x A \code{\link{SimulationResults}} object created by \code{\link[=getSimulationSurvival]{getSimulationSurvival()}}. #' @param aggregate Logical. If \code{TRUE} the raw data will be aggregated similar to #' the result of \code{\link[=getData]{getData()}}, default is \code{FALSE}. #' #' @details #' This function works only if \code{\link[=getSimulationSurvival]{getSimulationSurvival()}} was called with a \cr #' \code{maxNumberOfRawDatasetsPerStage} > 0 (default is \code{0}). #' #' This function can be used to get the simulated raw data from a simulation results #' object obtained by \code{\link[=getSimulationSurvival]{getSimulationSurvival()}}. Note that \code{\link[=getSimulationSurvival]{getSimulationSurvival()}} #' must called before with \code{maxNumberOfRawDatasetsPerStage} > 0. #' The data frame contains the following columns: #' \enumerate{ #' \item \code{iterationNumber}: The number of the simulation iteration. #' \item \code{stopStage}: The stage of stopping. #' \item \code{subjectId}: The subject id (increasing number 1, 2, 3, ...) #' \item \code{accrualTime}: The accrual time, i.e., the time when the subject entered the trial. #' \item \code{treatmentGroup}: The treatment group number (1 or 2). #' \item \code{survivalTime}: The survival time of the subject. #' \item \code{dropoutTime}: The dropout time of the subject (may be \code{NA}). #' \item \code{observationTime}: The specific observation time. #' \item \code{timeUnderObservation}: The time under observation is defined as follows:\cr #' if (event == TRUE) {\cr #' timeUnderObservation <- survivalTime;\cr #' } else if (dropoutEvent == TRUE) {\cr #' timeUnderObservation <- dropoutTime;\cr #' } else {\cr #' timeUnderObservation <- observationTime - accrualTime;\cr #' } #' \item \code{event}: \code{TRUE} if an event occurred; \code{FALSE} otherwise. #' \item \code{dropoutEvent}: \code{TRUE} if an dropout event occurred; \code{FALSE} otherwise. #' } #' #' @template return_dataframe #' #' @examples #' \dontrun{ #' results <- getSimulationSurvival( #' pi1 = seq(0.3, 0.6, 0.1), pi2 = 0.3, eventTime = 12, #' accrualTime = 24, plannedEvents = 40, maxNumberOfSubjects = 200, #' maxNumberOfIterations = 50, maxNumberOfRawDatasetsPerStage = 5 #' ) #' rawData <- getRawData(results) #' head(rawData) #' dim(rawData) #' } #' #' @export #' getRawData <- function(x, aggregate = FALSE) { if (!inherits(x, "SimulationResultsSurvival")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'x' must be a 'SimulationResultsSurvival' object; use getSimulationSurvival() to create one" ) } rawData <- x$.rawData if (is.null(rawData) || ncol(rawData) == 0 || nrow(rawData) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "simulation results contain no raw data; ", "choose a 'maxNumberOfRawDatasetsPerStage' > 0, e.g., ", "getSimulationSurvival(..., maxNumberOfRawDatasetsPerStage = 1)" ) } if (!aggregate) { return(rawData) } return(.getAggregatedData(rawData)) } rpact/R/f_analysis_enrichment_means.R0000644000176200001440000017465214445307575017473 0ustar liggesusers## | ## | *Analysis of means in enrichment designs with adaptive test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_logger.R NULL .calcMeansVariancesTestStatistics <- function(dataInput, subset, stage, thetaH0, stratifiedAnalysis, varianceOption) { .assertIsSingleInteger(stage, "stage") .assertIsSingleNumber(thetaH0, "thetaH0") .assertIsSingleLogical(stratifiedAnalysis, "stratifiedAnalysis") .assertIsSingleCharacter(varianceOption, "varianceOption") n <- rep(NA_real_, 2) on <- rep(NA_real_, 2) m <- rep(NA_real_, 2) om <- rep(NA_real_, 2) v <- rep(NA_real_, 2) ov <- rep(NA_real_, 2) for (i in 1:2) { m[i] <- sum(dataInput$getSampleSizes(stage = stage, subset = subset, group = i) * dataInput$getMeans(stage = stage, subset = subset, group = i), na.rm = TRUE) / sum(dataInput$getSampleSizes(stage = stage, subset = subset, group = i), na.rm = TRUE) # calculate residual variance from full population (only if gMax = 2) if (length(subset) == 1 && subset == "S1" && varianceOption == "pooledFromFull") { if (dataInput$isStratified()) { v[i] <- sum((dataInput$getSampleSizes(stage = stage, subset = c("S1", "R"), group = i) - 1) * dataInput$getStDev(stage = stage, subset = c("S1", "R"), group = i)^2, na.rm = TRUE) / (sum(dataInput$getSampleSizes(stage = stage, subset = c("S1", "R"), group = i) - 1, na.rm = TRUE)) n[i] <- sum(dataInput$getSampleSizes(stage = stage, subset = c("S1", "R"), group = i), na.rm = TRUE) } else { if (is.na(dataInput$getSampleSizes(stage = stage, subset = c("F"), group = i))) { v[i] <- dataInput$getStDev(stage = stage, subset = c("S1"), group = i)^2 n[i] <- dataInput$getSampleSizes(stage = stage, subset = c("S1"), group = i) } else { v[i] <- dataInput$getStDev(stage = stage, subset = c("F"), group = i)^2 n[i] <- dataInput$getSampleSizes(stage = stage, subset = c("F"), group = i) } } } else if (varianceOption == "pooledFromFull") { v[i] <- sum((dataInput$getSampleSizes(stage = stage, subset = subset, group = i) - 1) * dataInput$getStDev(stage = stage, subset = subset, group = i)^2 / sum(dataInput$getSampleSizes(stage = stage, subset = subset, group = i) - 1, na.rm = TRUE)) n[i] <- sum(dataInput$getSampleSizes(stage = stage, subset = subset, group = i), na.rm = TRUE) } else { v[i] <- sum((dataInput$getSampleSizes(stage = stage, subset = subset, group = i) - 1) * dataInput$getStDev(stage = stage, subset = subset, group = i)^2 + dataInput$getSampleSizes(stage = stage, subset = subset, group = i) * (dataInput$getMeans(stage = stage, subset = subset, group = i) - m[i])^2, na.rm = TRUE) / (sum(dataInput$getSampleSizes(stage = stage, subset = subset, group = i), na.rm = TRUE) - 1) n[i] <- sum(dataInput$getSampleSizes(stage = stage, subset = subset, group = i), na.rm = TRUE) } # calculation for overall data on[i] <- sum(dataInput$getOverallSampleSizes(stage = stage, subset = subset, group = i), na.rm = TRUE) om[i] <- sum(dataInput$getOverallSampleSizes(stage = stage, subset = subset, group = i) * dataInput$getOverallMeans(stage = stage, subset = subset, group = i), na.rm = TRUE) / on[i] ov[i] <- sum((dataInput$getOverallSampleSizes(stage = stage, subset = subset, group = i) - 1) * dataInput$getOverallStDev(stage = stage, subset = subset, group = i)^2 + dataInput$getOverallSampleSizes(stage = stage, subset = subset, group = i) * (dataInput$getOverallMeans(stage = stage, subset = subset, group = i) - om[i])^2, na.rm = TRUE) / (sum(dataInput$getOverallSampleSizes(stage = stage, subset = subset, group = i), na.rm = TRUE) - 1) } df <- NA_real_ if (stratifiedAnalysis) { weights <- dataInput$getSampleSizes(stage = stage, subset = subset, group = 1) * dataInput$getSampleSizes(stage = stage, subset = subset, group = 2) / (dataInput$getSampleSizes(stage = stage, subset = subset, group = 1) + dataInput$getSampleSizes(stage = stage, subset = subset, group = 2)) if (varianceOption == "pooledFromFull") { pv <- ((n[1] - 1) * v[1] + (n[2] - 1) * v[2]) / (n[1] + n[2] - 2) testStatistics <- sum( (dataInput$getMeans(stage = stage, subset = subset, group = 1) - dataInput$getMeans(stage = stage, subset = subset, group = 2) - thetaH0) * weights, na.rm = TRUE ) / sqrt(sum(pv * weights, na.rm = TRUE)) } else if (varianceOption == "pooled") { pv <- ((dataInput$getSampleSizes(stage = stage, subset = subset, group = 1) - 1) * dataInput$getStDevs(stage = stage, subset = subset, group = 1)^2 + (dataInput$getSampleSizes(stage = stage, subset = subset, group = 2) - 1) * dataInput$getStDevs(stage = stage, subset = subset, group = 2)^2) / (dataInput$getSampleSizes(stage = stage, subset = subset, group = 1) + dataInput$getSampleSizes(stage = stage, subset = subset, group = 2) - 2) testStatistics <- sum( (dataInput$getMeans(stage = stage, subset = subset, group = 1) - dataInput$getMeans(stage = stage, subset = subset, group = 2) - thetaH0) * weights, na.rm = TRUE ) / sqrt(sum(pv * weights, na.rm = TRUE)) } else { pv <- dataInput$getStDevs(stage = stage, subset = subset, group = 1)^2 / dataInput$getSampleSizes(stage = stage, subset = subset, group = 1) + dataInput$getStDevs(stage = stage, subset = subset, group = 2)^2 / dataInput$getSampleSizes(stage = stage, subset = subset, group = 2) testStatistics <- sum( (dataInput$getMeans(stage = stage, subset = subset, group = 1) - dataInput$getMeans(stage = stage, subset = subset, group = 2) - thetaH0) * weights, na.rm = TRUE ) / sqrt(sum(pv * weights^2, na.rm = TRUE)) } df <- sum(dataInput$getSampleSizes(stage = stage, subset = subset, group = 1), na.rm = TRUE) + sum(dataInput$getSampleSizes(stage = stage, subset = subset, group = 2), na.rm = TRUE) - length(dataInput$getSampleSizes(stage = stage, subset = subset, group = 1)) - length(dataInput$getSampleSizes(stage = stage, subset = subset, group = 2)) } # non-stratified analysis else { if (varianceOption == "pooledFromFull") { pv <- ((n[1] - 1) * v[1] + (n[2] - 1) * v[2]) / (n[1] + n[2] - 2) testStatistics <- (m[1] - m[2] - thetaH0) / sqrt(pv * (1 / sum(dataInput$getSampleSizes(stage = stage, subset = subset, group = 1), na.rm = TRUE) + 1 / sum(dataInput$getSampleSizes(stage = stage, subset = subset, group = 2), na.rm = TRUE))) df <- n[1] + n[2] - length(dataInput$getSampleSizes(stage = stage, subset = subset, group = 1)) - length(dataInput$getSampleSizes(stage = stage, subset = subset, group = 2)) } else if (varianceOption == "pooled") { pv <- ((n[1] - 1) * v[1] + (n[2] - 1) * v[2]) / (n[1] + n[2] - 2) testStatistics <- (m[1] - m[2] - thetaH0) / sqrt(pv * (1 / n[1] + 1 / n[2])) df <- n[1] + n[2] - 2 } else { testStatistics <- (m[1] - m[2] - thetaH0) / sqrt(v[1] / n[1] + v[2] / n[2]) u <- v[1] / n[1] / (v[1] / n[1] + v[2] / n[2]) df <- 1 / (u^2 / (n[1] - 1) + (1 - u)^2 / (n[2] - 1)) } } testStatistics[is.nan(testStatistics)] <- NA_real_ if (any(is.nan(om))) { om <- rep(NA_real_, 2) ov <- rep(NA_real_, 2) } # consider the case n[1] = n[2] = 0 df[!is.na(df) & df <= 0] <- NA_real_ ov[!is.na(ov) & ov <= 0] <- NA_real_ if ("R" %in% subset && is.na(dataInput$getSampleSizes(stage = stage, subset = "R", group = 1)) || ("S1" %in% subset) && is.na(dataInput$getSampleSizes(stage = stage, subset = "S1", group = 1)) || ("S2" %in% subset) && is.na(dataInput$getSampleSizes(stage = stage, subset = "S2", group = 1)) || ("S3" %in% subset) && is.na(dataInput$getSampleSizes(stage = stage, subset = "S3", group = 1)) || ("S4" %in% subset) && is.na(dataInput$getSampleSizes(stage = stage, subset = "S4", group = 1)) ) { n <- rep(NA_real_, 2) m <- rep(NA_real_, 2) v <- rep(NA_real_, 2) on <- rep(NA_real_, 2) om <- rep(NA_real_, 2) ov <- rep(NA_real_, 2) df <- NA_real_ testStatistics <- NA_real_ } return(list( populationNs = n, populationMeans = m, overallMeans = om, overallStDevs = sqrt(((on[1] - 1) * ov[1] + (on[2] - 1) * ov[2]) / (on[1] + on[2] - 2)), overallSampleSizes1 = on[1], overallSampleSizes2 = on[2], df = df, testStatistics = testStatistics )) } .getStageResultsMeansEnrichment <- function(..., design, dataInput, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, varianceOption = C_VARIANCE_OPTION_ENRICHMENT_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, calculateSingleStepAdjusted = FALSE, userFunctionCallEnabled = FALSE) { .assertIsTrialDesign(design) .assertIsDatasetMeans(dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertIsValidDirectionUpper(directionUpper, design$sided) .assertIsSingleLogical(normalApproximation, "normalApproximation") .assertIsValidVarianceOptionEnrichment(design, varianceOption) .assertIsValidIntersectionTestEnrichment(design, intersectionTest) .warnInCaseOfUnknownArguments( functionName = ".getStageResultsMeansEnrichment", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) kMax <- design$kMax if (dataInput$isStratified()) { gMax <- log(length(levels(factor(dataInput$subsets))), 2) + 1 } else { gMax <- length(levels(factor(dataInput$subsets))) } .assertIsValidIntersectionTestEnrichment(design, intersectionTest) if ((gMax > 2) && intersectionTest == "SpiessensDebois") { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "gMax (", gMax, ") > 2: Spiessens & Debois intersection test test can only be used for one subset" ) } if (varianceOption == "pooledFromFull") { if (gMax > 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "gMax (", gMax, ") > 2: varianceOption 'pooledFromFull' can only be used for one subset" ) } } if (intersectionTest == "SpiessensDebois" && varianceOption != "pooledFromFull" && !normalApproximation) { stop("Spiessens & Depois t test can only be performed with pooled ", "residual (stratified) variance from full population, select 'varianceOption' = \"pooledFromFull\"", call. = FALSE ) } if (intersectionTest == "SpiessensDebois" && !stratifiedAnalysis && !normalApproximation) { stop("Spiessens & Depois t test can only be performed with pooled ", "residual (stratified) variance from full population, select 'stratifiedAnalysis' = TRUE", call. = FALSE ) } if (dataInput$isStratified() && (gMax > 4)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "gMax (", gMax, ") > 4: Stratified analysis not implemented" ) } stageResults <- StageResultsEnrichmentMeans( design = design, dataInput = dataInput, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), normalApproximation = normalApproximation, directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, stage = stage ) .setValueAndParameterType( stageResults, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_ENRICHMENT_DEFAULT ) .setValueAndParameterType( stageResults, "stratifiedAnalysis", stratifiedAnalysis, C_STRATIFIED_ANALYSIS_DEFAULT ) effectSizes <- matrix(NA_real_, nrow = gMax, ncol = kMax) means1 <- matrix(NA_real_, nrow = gMax, ncol = kMax) means2 <- matrix(NA_real_, nrow = gMax, ncol = kMax) stDevs1 <- matrix(NA_real_, nrow = gMax, ncol = kMax) stDevs2 <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallSampleSizes1 <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallSampleSizes2 <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallStDevs <- matrix(NA_real_, nrow = gMax, ncol = kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) dimnames(testStatistics) <- list( paste("population ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "") ) dimnames(separatePValues) <- list( paste("population ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "") ) subsets <- .createSubsetsByGMax(gMax = gMax, stratifiedInput = dataInput$isStratified(), subsetIdPrefix = "S") for (k in 1:stage) { for (population in 1:gMax) { subset <- subsets[[population]] results <- .calcMeansVariancesTestStatistics(dataInput, subset, k, thetaH0, stratifiedAnalysis, varianceOption) effectSizes[population, k] <- results$overallMeans[1] - results$overallMeans[2] testStatistics[population, k] <- results$testStatistics if (normalApproximation) { separatePValues[population, k] <- 1 - stats::pnorm(testStatistics[population, k]) } else { separatePValues[population, k] <- 1 - stats::pt(testStatistics[population, k], results$df) } overallSampleSizes1[population, k] <- results$overallSampleSizes1 overallSampleSizes2[population, k] <- results$overallSampleSizes2 overallStDevs[population, k] <- results$overallStDevs if (!directionUpper) { separatePValues[population, k] <- 1 - separatePValues[population, k] } } } .setWeightsToStageResults(design, stageResults) # Calculation of single stage adjusted p-Values and overall test statistics # for determination of RCIs if (calculateSingleStepAdjusted) { singleStepAdjustedPValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) combInverseNormal <- matrix(NA_real_, nrow = gMax, ncol = kMax) combFisher <- matrix(NA_real_, nrow = gMax, ncol = kMax) if (.isTrialDesignInverseNormal(design)) { weightsInverseNormal <- stageResults$weightsInverseNormal } else if (.isTrialDesignFisher(design)) { weightsFisher <- stageResults$weightsFisher } for (k in 1:stage) { selected <- sum(!is.na(separatePValues[, k])) for (population in 1:gMax) { if ((intersectionTest == "Bonferroni") || (intersectionTest == "Simes")) { singleStepAdjustedPValues[population, k] <- min(1, separatePValues[population, k] * selected) } else if (intersectionTest == "Sidak") { singleStepAdjustedPValues[population, k] <- 1 - (1 - separatePValues[population, k])^selected } else if (intersectionTest == "SpiessensDebois") { if (!is.na(testStatistics[population, k])) { df <- NA_real_ if (!normalApproximation) { if (dataInput$isStratified()) { df <- sum(dataInput$getSampleSizes(stage = k) - 1, na.rm = TRUE) } else { if (selected == 2) { df <- sum(dataInput$getSampleSizes(stage = k, subset = "F") - 2, na.rm = TRUE) } else { df <- sum(dataInput$getSampleSizes(stage = k, subset = "S1") - 2, na.rm = TRUE) } } } sigma <- 1 if (selected == 2) { if (dataInput$isStratified()) { sigma <- matrix(rep(sqrt(sum(dataInput$getSampleSizes(stage = k, subset = "S1")) / sum(dataInput$getSampleSizes(stage = k))), 4), nrow = 2) } else { sigma <- matrix(rep(sqrt(sum(dataInput$getSampleSizes(stage = k, subset = "S1")) / sum(dataInput$getSampleSizes(stage = k, subset = "F"))), 4), nrow = 2) } diag(sigma) <- 1 } singleStepAdjustedPValues[population, k] <- 1 - .getMultivariateDistribution( type = ifelse(normalApproximation, "normal", "t"), upper = ifelse(directionUpper, testStatistics[population, k], -testStatistics[population, k] ), sigma = sigma, df = df ) } } if (.isTrialDesignInverseNormal(design)) { combInverseNormal[population, k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(singleStepAdjustedPValues[population, 1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) } else if (.isTrialDesignFisher(design)) { combFisher[population, k] <- prod(singleStepAdjustedPValues[ population, 1:k ]^weightsFisher[1:k]) } } } stageResults$overallTestStatistics <- overallTestStatistics stageResults$effectSizes <- effectSizes stageResults$overallStDevs <- overallStDevs stageResults$testStatistics <- testStatistics stageResults$separatePValues <- separatePValues stageResults$singleStepAdjustedPValues <- singleStepAdjustedPValues stageResults$.setParameterType("singleStepAdjustedPValues", C_PARAM_GENERATED) if (.isTrialDesignFisher(design)) { stageResults$combFisher <- combFisher stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$combInverseNormal <- combInverseNormal stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) } } else { stageResults$overallTestStatistics <- overallTestStatistics stageResults$effectSizes <- effectSizes stageResults$overallStDevs <- overallStDevs stageResults$.overallSampleSizes1 <- overallSampleSizes1 stageResults$.overallSampleSizes2 <- overallSampleSizes2 stageResults$testStatistics <- testStatistics stageResults$separatePValues <- separatePValues } return(stageResults) } .getAnalysisResultsMeansEnrichment <- function(..., design, dataInput) { if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsMeansInverseNormalEnrichment(design = design, dataInput = dataInput, ...)) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsMeansFisherEnrichment(design = design, dataInput = dataInput, ...)) } .stopWithWrongDesignMessageEnrichment(design) } .getAnalysisResultsMeansInverseNormalEnrichment <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, varianceOption = C_VARIANCE_OPTION_ENRICHMENT_DEFAULT, thetaH0 = C_THETA_H0_MEANS_DEFAULT, thetaH1 = NA_real_, assumedStDevs = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsMeansInverseNormalEnrichment", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) results <- AnalysisResultsEnrichmentInverseNormal(design = design, dataInput = dataInput) results <- .getAnalysisResultsMeansEnrichmentAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, thetaH0 = thetaH0, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance ) return(results) } .getAnalysisResultsMeansFisherEnrichment <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, varianceOption = C_VARIANCE_OPTION_ENRICHMENT_DEFAULT, thetaH0 = C_THETA_H0_MEANS_DEFAULT, thetaH1 = NA_real_, assumedStDevs = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsMeansFisherEnrichment", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsEnrichmentFisher(design = design, dataInput = dataInput) .setValueAndParameterType(results, "iterations", as.integer(iterations), C_ITERATIONS_DEFAULT) .setValueAndParameterType(results, "seed", seed, NA_real_) results <- .getAnalysisResultsMeansEnrichmentAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, thetaH0 = thetaH0, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } .getAnalysisResultsMeansEnrichmentAll <- function(..., results, design, dataInput, intersectionTest, stage, directionUpper, normalApproximation, stratifiedAnalysis, varianceOption, thetaH0, thetaH1, assumedStDevs, nPlanned, allocationRatioPlanned, tolerance, iterations, seed) { startTime <- Sys.time() stageResults <- .getStageResultsMeansEnrichment( design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, userFunctionCallEnabled = TRUE ) .logProgress("Stage results calculated", startTime = startTime) normalApproximation <- stageResults$normalApproximation intersectionTest <- stageResults$intersectionTest results$.setStageResults(stageResults) thetaH1 <- .assertIsValidThetaH1ForEnrichment(thetaH1, stageResults, stage, results = results) assumedStDevs <- .assertIsValidAssumedStDevForMultiHypotheses( assumedStDevs, stageResults, stage, results = results ) .setValueAndParameterType( results, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_ENRICHMENT_DEFAULT ) .setValueAndParameterType(results, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType( results, "normalApproximation", normalApproximation, C_NORMAL_APPROXIMATION_MEANS_DEFAULT ) .setValueAndParameterType(results, "stratifiedAnalysis", stratifiedAnalysis, C_STRATIFIED_ANALYSIS_DEFAULT) .setValueAndParameterType(results, "varianceOption", varianceOption, C_VARIANCE_OPTION_ENRICHMENT_DEFAULT) .setValueAndParameterType(results, "thetaH0", thetaH0, C_THETA_H0_MEANS_DEFAULT) .setConditionalPowerArguments(results, dataInput, nPlanned, allocationRatioPlanned) .setNPlannedAndThetaH1AndAssumedStDevs(results, nPlanned, thetaH1, assumedStDevs) startTime <- Sys.time() results$.closedTestResults <- getClosedCombinationTestResults(stageResults = stageResults) .logProgress("Closed test calculated", startTime = startTime) if (design$kMax > 1) { # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { results$.conditionalPowerResults <- .getConditionalPowerMeansEnrichment( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, iterations = iterations, seed = seed ) .synchronizeIterationsAndSeed(results) } else { results$.conditionalPowerResults <- .getConditionalPowerMeansEnrichment( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDevs = assumedStDevs ) results$conditionalPower <- results$.conditionalPowerResults$conditionalPower results$.setParameterType("conditionalPower", C_PARAM_GENERATED) } results$thetaH1 <- matrix(results$.conditionalPowerResults$thetaH1, ncol = 1) results$assumedStDevs <- matrix(results$.conditionalPowerResults$assumedStDevs, ncol = 1) .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() results$conditionalRejectionProbabilities <- .getConditionalRejectionProbabilitiesEnrichment( stageResults = stageResults, stage = stage ) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_GENERATED) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) } else { results$.setParameterType("conditionalPower", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalPowerSimulated", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_NOT_APPLICABLE) } # RCI - repeated confidence interval repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsMeansEnrichment( design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, tolerance = tolerance ) gMax <- stageResults$getGMax() results$repeatedConfidenceIntervalLowerBounds <- matrix(rep(NA_real_, gMax * design$kMax), nrow = gMax, ncol = design$kMax) results$repeatedConfidenceIntervalUpperBounds <- results$repeatedConfidenceIntervalLowerBounds for (k in 1:design$kMax) { for (population in 1:gMax) { results$repeatedConfidenceIntervalLowerBounds[population, k] <- repeatedConfidenceIntervals[population, 1, k] results$repeatedConfidenceIntervalUpperBounds[population, k] <- repeatedConfidenceIntervals[population, 2, k] } } results$.setParameterType("repeatedConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("repeatedConfidenceIntervalUpperBounds", C_PARAM_GENERATED) # repeated p-value results$repeatedPValues <- .getRepeatedPValuesEnrichment(stageResults = stageResults, tolerance = tolerance) results$.setParameterType("repeatedPValues", C_PARAM_GENERATED) if (stratifiedAnalysis && !dataInput$isStratified()) { message("Standard deviations from full (and sub-populations) need to be stratified estimates") } return(results) } .getRootThetaMeansEnrichment <- function(..., design, dataInput, population, stage, directionUpper, normalApproximation, stratifiedAnalysis, varianceOption, intersectionTest, thetaLow, thetaUp, firstParameterName, secondValue, tolerance) { result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsMeansEnrichment( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][population, stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, callingFunctionInformation = ".getRootThetaMeansEnrichment" ) return(result) } .getUpperLowerThetaMeansEnrichment <- function(..., design, dataInput, theta, population, stage, directionUpper, normalApproximation, stratifiedAnalysis, varianceOption, conditionFunction, intersectionTest, firstParameterName, secondValue) { stageResults <- .getStageResultsMeansEnrichment( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][population, stage] maxSearchIterations <- 30 while (conditionFunction(secondValue, firstValue)) { theta <- 2 * theta stageResults <- .getStageResultsMeansEnrichment( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][population, stage] maxSearchIterations <- maxSearchIterations - 1 if (maxSearchIterations < 0) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, sprintf( paste0( "failed to find theta (k = %s, firstValue = %s, ", "secondValue = %s, levels(firstValue) = %s, theta = %s)" ), stage, stageResults[[firstParameterName]][population, stage], secondValue, firstValue, theta ) ) } } return(theta) } .getRepeatedConfidenceIntervalsMeansEnrichmentAll <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, varianceOption = C_VARIANCE_OPTION_ENRICHMENT_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { .assertIsValidIntersectionTestEnrichment(design, intersectionTest) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) stageResults <- .getStageResultsMeansEnrichment( design = design, dataInput = dataInput, stage = stage, thetaH0 = 0, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, calculateSingleStepAdjusted = FALSE ) gMax <- stageResults$getGMax() repeatedConfidenceIntervals <- array(NA_real_, dim = c(gMax, 2, design$kMax)) # Repeated confidence intervals when using combination tests if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT criticalValues <- design$criticalValues conditionFunction <- .isFirstValueSmallerThanSecondValue } else if (.isTrialDesignInverseNormal(design)) { bounds <- design$futilityBounds border <- C_FUTILITY_BOUNDS_DEFAULT criticalValues <- design$criticalValues criticalValues[is.infinite(criticalValues) & criticalValues > 0] <- C_QNORM_MAXIMUM criticalValues[is.infinite(criticalValues) & criticalValues < 0] <- C_QNORM_MINIMUM conditionFunction <- .isFirstValueGreaterThanSecondValue } # Necessary for adjustment for binding futility boundaries futilityCorr <- rep(NA_real_, design$kMax) stages <- (1:stage) for (k in stages) { startTime <- Sys.time() for (population in 1:gMax) { if (!is.na(stageResults$testStatistics[population, k]) && criticalValues[k] < C_QNORM_MAXIMUM) { # finding maximum upper and minimum lower bounds for RCIs thetaLow <- .getUpperLowerThetaMeansEnrichment( design = design, dataInput = dataInput, theta = -1, population = population, stage = k, directionUpper = TRUE, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, conditionFunction = conditionFunction, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k] ) thetaUp <- .getUpperLowerThetaMeansEnrichment( design = design, dataInput = dataInput, theta = 1, population = population, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, conditionFunction = conditionFunction, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k] ) # finding upper and lower RCI limits through root function repeatedConfidenceIntervals[population, 1, k] <- .getRootThetaMeansEnrichment( design = design, dataInput = dataInput, population = population, stage = k, directionUpper = TRUE, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) repeatedConfidenceIntervals[population, 2, k] <- .getRootThetaMeansEnrichment( design = design, dataInput = dataInput, population = population, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) # adjustment for binding futility bounds if (k > 1 && !is.na(bounds[k - 1]) && conditionFunction(bounds[k - 1], border) && design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "singleStepAdjustedPValues", firstParameterName ) # Calculate new lower and upper bounds if (directionUpper) { thetaLow <- .getUpperLowerThetaMeansEnrichment( design = design, dataInput = dataInput, theta = -1, population = population, stage = k - 1, directionUpper = TRUE, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, conditionFunction = conditionFunction, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1] ) } else { thetaUp <- .getUpperLowerThetaMeansEnrichment( design = design, dataInput = dataInput, theta = 1, population = population, stage = k - 1, directionUpper = FALSE, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, conditionFunction = conditionFunction, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1] ) } futilityCorr[k] <- .getRootThetaMeansEnrichment( design = design, dataInput = dataInput, population = population, stage = k - 1, directionUpper = directionUpper, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance ) if (directionUpper) { repeatedConfidenceIntervals[population, 1, k] <- min( min(futilityCorr[2:k]), repeatedConfidenceIntervals[population, 1, k] ) } else { repeatedConfidenceIntervals[population, 2, k] <- max( max(futilityCorr[2:k]), repeatedConfidenceIntervals[population, 2, k] ) } } if (!is.na(repeatedConfidenceIntervals[population, 1, k]) && !is.na(repeatedConfidenceIntervals[population, 2, k]) && repeatedConfidenceIntervals[population, 1, k] > repeatedConfidenceIntervals[population, 2, k]) { repeatedConfidenceIntervals[population, , k] <- rep(NA_real_, 2) } } } .logProgress("Repeated confidence intervals for stage %s calculated", startTime = startTime, k) } return(repeatedConfidenceIntervals) } #' #' RCIs based on inverse normal combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsMeansEnrichmentInverseNormal <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, varianceOption = C_VARIANCE_OPTION_ENRICHMENT_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsMeansEnrichmentInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) return(.getRepeatedConfidenceIntervalsMeansEnrichmentAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combInverseNormal", ... )) } #' #' RCIs based on Fisher's combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsMeansEnrichmentFisher <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, varianceOption = C_VARIANCE_OPTION_ENRICHMENT_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsMeansEnrichmentFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) return(.getRepeatedConfidenceIntervalsMeansEnrichmentAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, varianceOption = varianceOption, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combFisher", ... )) } #' #' Calculation of lower and upper limits of repeated confidence intervals (RCIs) for Means #' #' @noRd #' .getRepeatedConfidenceIntervalsMeansEnrichment <- function(..., design) { if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsMeansEnrichmentInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsMeansEnrichmentFisher(design = design, ...)) } .stopWithWrongDesignMessageEnrichment(design) } #' #' Calculation of conditional power for Means #' #' @noRd #' .getConditionalPowerMeansEnrichment <- function(..., stageResults, stage = stageResults$stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaH1 = NA_real_, assumedStDevs = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { design <- stageResults$.design gMax <- stageResults$getGMax() kMax <- design$kMax stDevsH1 <- .getOptionalArgument("stDevsH1", ...) if (!is.null(stDevsH1) && !is.na(stDevsH1)) { if (!is.na(assumedStDevs)) { warning(sQuote("assumedStDevs"), " will be ignored because ", sQuote("stDevsH1"), " is defined", call. = FALSE) } assumedStDevs <- stDevsH1 } results <- ConditionalPowerResultsEnrichmentMeans( .design = design, .stageResults = stageResults, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) if (any(is.na(nPlanned))) { return(results) } .assertIsValidStage(stage, kMax) if (stage == kMax) { .logDebug( "Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", kMax, ")" ) return(results) } if (!.isValidNPlanned(nPlanned = nPlanned, kMax = kMax, stage = stage)) { return(results) } .assertIsValidNPlanned(nPlanned, kMax, stage) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) .setValueAndParameterType(results, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) assumedStDevs <- .assertIsValidAssumedStDevForMultiHypotheses( assumedStDevs, stageResults, stage, results = results ) .assertIsValidAssumedStDevs(assumedStDevs, gMax) thetaH1 <- .assertIsValidThetaH1ForEnrichment(thetaH1, stageResults, stage, results = results) if (length(thetaH1) != 1 && length(thetaH1) != gMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0( "length of 'thetaH1' (%s) ", "must be equal to 'gMax' (%s) or 1" ), .arrayToString(thetaH1), gMax) ) } if (length(assumedStDevs) == 1) { results$assumedStDevs <- rep(assumedStDevs, gMax) results$.setParameterType("assumedStDevs", C_PARAM_GENERATED) } else { if (any(is.na(assumedStDevs[!is.na(stageResults$testStatistics[, stage])]))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any of 'assumedStDevs' not correctly specified" ) } } if (length(thetaH1) > 1) { if (any(is.na(thetaH1[!is.na(stageResults$testStatistics[, stage])]))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any of 'thetaH1' not correctly specified" ) } } if (.isTrialDesignInverseNormal(design)) { return(.getConditionalPowerMeansEnrichmentInverseNormal( results = results, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, ... )) } else if (.isTrialDesignFisher(design)) { return(.getConditionalPowerMeansEnrichmentFisher( results = results, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDevs = assumedStDevs, iterations = iterations, seed = seed, ... )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of TrialDesignInverseNormal or TrialDesignFisher" ) } #' #' Calculation of conditional power based on inverse normal method #' #' @noRd #' .getConditionalPowerMeansEnrichmentInverseNormal <- function(..., results, stageResults, stage, allocationRatioPlanned, nPlanned, thetaH1, assumedStDevs) { design <- stageResults$.design .assertIsTrialDesignInverseNormal(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerMeansEnrichmentInverseNormal", ignore = c("stage", "design", "stDevsH1"), ... ) kMax <- design$kMax gMax <- stageResults$getGMax() weights <- .getWeightsInverseNormal(design) informationRates <- design$informationRates nPlanned <- c(rep(NA_real_, stage), nPlanned) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned .setValueAndParameterType( results, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT ) if (length(thetaH1) == 1) { thetaH1 <- rep(thetaH1, gMax) results$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { results$.setParameterType("thetaH1", C_PARAM_DEFAULT_VALUE) } results$.setParameterType("assumedStDevs", C_PARAM_DEFAULT_VALUE) if (stageResults$directionUpper) { standardizedEffect <- (thetaH1 - stageResults$thetaH0) / assumedStDevs } else { standardizedEffect <- -(thetaH1 - stageResults$thetaH0) / assumedStDevs } ctr <- .performClosedCombinationTest(stageResults = stageResults) criticalValues <- design$criticalValues for (population in 1:gMax) { if (!is.na(ctr$separatePValues[population, stage])) { # shifted decision region for use in getGroupSeqProbs # Inverse Normal Method shiftedDecisionRegionUpper <- criticalValues[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, population] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - standardizedEffect[population] * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, population] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - standardizedEffect[population] * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) results$conditionalPower[population, (stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$thetaH1 <- thetaH1 results$assumedStDevs <- assumedStDevs return(results) } #' #' Calculation of conditional power based on Fisher's combination test #' #' @noRd #' .getConditionalPowerMeansEnrichmentFisher <- function(..., results, stageResults, stage, allocationRatioPlanned, nPlanned, thetaH1, assumedStDevs, iterations, seed) { design <- stageResults$.design .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerMeansEnrichmentFisher", ignore = c("stage", "design", "stDevsH1"), ... ) kMax <- design$kMax gMax <- stageResults$getGMax() criticalValues <- design$criticalValues weightsFisher <- .getWeightsFisher(design) results$conditionalPower <- matrix(NA_real_, nrow = gMax, ncol = kMax) results$iterations <- as.integer(iterations) results$.setParameterType("iterations", C_PARAM_USER_DEFINED) results$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) results$seed <- .setSeed(seed) results$simulated <- FALSE results$.setParameterType("simulated", C_PARAM_DEFAULT_VALUE) .setValueAndParameterType( results, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT ) if (length(thetaH1) == 1) { thetaH1 <- rep(thetaH1, gMax) results$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { results$.setParameterType("thetaH1", C_PARAM_DEFAULT_VALUE) } results$.setParameterType("assumedStDevs", C_PARAM_DEFAULT_VALUE) if (stageResults$directionUpper) { standardizedEffect <- (thetaH1 - stageResults$thetaH0) / assumedStDevs } else { standardizedEffect <- -(thetaH1 - stageResults$thetaH0) / assumedStDevs } nPlanned <- c(rep(NA_real_, stage), nPlanned) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned ctr <- .performClosedCombinationTest(stageResults = stageResults) for (population in 1:gMax) { if (!is.na(ctr$separatePValues[population, stage])) { if (gMax == 1) { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, population] == 1, ][1:stage] } else { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, population] == 1, ][which.max( ctr$overallAdjustedTestStatistics[ctr$indices[, population] == 1, stage] ), 1:stage] } if (stage < kMax - 1) { for (k in (stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = standardizedEffect[population], stage = stage, nPlanned = nPlanned ) } results$conditionalPower[population, k] <- reject / iterations } results$simulated <- TRUE results$.setParameterType("simulated", C_PARAM_GENERATED) } else if (stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1 / weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Calculation not possible: could not calculate conditional power for stage ", kMax, call. = FALSE ) results$conditionalPower[population, kMax] <- NA_real_ } else { results$conditionalPower[population, kMax] <- 1 - stats::pnorm(.getQNorm(result) - standardizedEffect[population] * sqrt(nPlanned[kMax])) } } } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$thetaH1 <- thetaH1 results$assumedStDevs <- assumedStDevs return(results) } #' #' Calculation of conditional power and likelihood values for plotting the graph #' #' @noRd #' .getConditionalPowerLikelihoodMeansEnrichment <- function(..., stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaRange, assumedStDevs = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .associatedArgumentsAreDefined(nPlanned = nPlanned, thetaRange = thetaRange) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) design <- stageResults$.design kMax <- design$kMax gMax <- stageResults$getGMax() intersectionTest <- stageResults$intersectionTest assumedStDevs <- .assertIsValidAssumedStDevForMultiHypotheses(assumedStDevs, stageResults, stage) if (length(assumedStDevs) == 1) { assumedStDevs <- rep(assumedStDevs, gMax) } thetaRange <- .assertIsValidThetaRange(thetaRange = thetaRange) populations <- numeric(gMax * length(thetaRange)) effectValues <- numeric(gMax * length(thetaRange)) condPowerValues <- numeric(gMax * length(thetaRange)) likelihoodValues <- numeric(gMax * length(thetaRange)) stdErr <- stageResults$overallStDevs[stage] * sqrt(1 / stageResults$.overallSampleSizes1[, stage] + 1 / stageResults$.overallSampleSizes2[, stage]) results <- ConditionalPowerResultsEnrichmentMeans( .design = design, .stageResults = stageResults, assumedStDevs = assumedStDevs, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) j <- 1 for (i in seq(along = thetaRange)) { for (population in 1:gMax) { populations[j] <- population effectValues[j] <- thetaRange[i] if (.isTrialDesignInverseNormal(design)) { condPowerValues[j] <- .getConditionalPowerMeansEnrichmentInverseNormal( results = results, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], assumedStDevs = assumedStDevs )$conditionalPower[population, kMax] } else if (.isTrialDesignFisher(design)) { condPowerValues[j] <- .getConditionalPowerMeansEnrichmentFisher( results = results, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], assumedStDevs = assumedStDevs, iterations = iterations, seed = seed )$conditionalPower[population, kMax] } likelihoodValues[j] <- stats::dnorm( thetaRange[i], stageResults$effectSizes[population, stage], stdErr[population] ) / stats::dnorm(0, 0, stdErr[population]) j <- j + 1 } } subtitle <- paste0( "Intersection test = ", intersectionTest, ", stage = ", stage, ", # of remaining subjects = ", sum(nPlanned), ", sd = ", .formatSubTitleValue(assumedStDevs, "assumedStDevs"), ", allocation ratio = ", .formatSubTitleValue(allocationRatioPlanned, "allocationRatioPlanned") ) return(list( populations = populations, xValues = effectValues, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, xlab = "Effect size", ylab = C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD, sub = subtitle )) } rpact/R/f_analysis_base_means.R0000644000176200001440000023113214450463134016222 0ustar liggesusers## | ## | *Analysis of means with group sequential and combination test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7147 $ ## | Last changed: $Date: 2023-07-03 08:10:31 +0200 (Mo, 03 Jul 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_logger.R NULL .getAnalysisResultsMeans <- function(..., design, dataInput) { if (.isTrialDesignGroupSequential(design)) { return(.getAnalysisResultsMeansGroupSequential( design = design, dataInput = dataInput, ... )) } if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsMeansInverseNormal( design = design, dataInput = dataInput, ... )) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsMeansFisher( design = design, dataInput = dataInput, ... )) } .stopWithWrongDesignMessage(design, inclusiveConditionalDunnett = FALSE) } .getAnalysisResultsMeansInverseNormal <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, thetaH0 = C_THETA_H0_MEANS_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, assumedStDev = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsMeansInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsInverseNormal(design = design, dataInput = dataInput) .getAnalysisResultsMeansAll( results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, assumedStDev = assumedStDev, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance ) return(results) } .getAnalysisResultsMeansGroupSequential <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, thetaH0 = C_THETA_H0_MEANS_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, assumedStDev = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsMeansGroupSequential", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), c("stage", "stDevH1")), ... ) results <- AnalysisResultsGroupSequential(design = design, dataInput = dataInput) stDevH1 <- .getOptionalArgument("stDevH1", ...) if (!is.null(stDevH1)) { .assertIsSingleNumber(assumedStDev, "assumedStDev", naAllowed = TRUE) if (!is.na(assumedStDev)) { if (!identical(assumedStDev, stDevH1)) { stop(C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "either 'assumedStDev' or 'stDevH1' must be defined") } } assumedStDev <- stDevH1 } .getAnalysisResultsMeansAll( results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, assumedStDev = assumedStDev, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance ) return(results) } .getAnalysisResultsMeansFisher <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, thetaH0 = C_THETA_H0_MEANS_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, assumedStDev = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsMeansFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsFisher(design = design, dataInput = dataInput) .setValueAndParameterType(results, "iterations", as.integer(iterations), C_ITERATIONS_DEFAULT) .setValueAndParameterType(results, "seed", seed, NA_real_) .getAnalysisResultsMeansAll( results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, assumedStDev = assumedStDev, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } #' #' The following parameters will be taken from 'design': #' stages, informationRates, criticalValues, futilityBounds, alphaSpent, stageLevels #' #' @noRd #' .getAnalysisResultsMeansAll <- function(..., results, design, dataInput, stage, directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, thetaH0, thetaH1, assumedStDev, nPlanned, allocationRatioPlanned, tolerance, iterations, seed) { startTime <- Sys.time() .assertIsValidTolerance(tolerance) stageResults <- .getStageResultsMeans( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) results$.setStageResults(stageResults) .logProgress("Stage results calculated", startTime = startTime) assumedStDev <- .assertIsValidAssumedStDev(assumedStDev, stageResults, stage, results = results) thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage, results = results) .setValueAndParameterType(results, "thetaH0", thetaH0, C_THETA_H0_MEANS_DEFAULT) .setValueAndParameterType(results, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType( results, "normalApproximation", normalApproximation, C_NORMAL_APPROXIMATION_MEANS_DEFAULT ) if (stageResults$isTwoSampleDataset()) { .setValueAndParameterType(results, "equalVariances", equalVariances, C_EQUAL_VARIANCES_DEFAULT) } else { results$.setParameterType("equalVariances", C_PARAM_NOT_APPLICABLE) } .setConditionalPowerArguments(results, dataInput, nPlanned, allocationRatioPlanned) .setNPlannedAndThetaH1AndAssumedStDev(results, nPlanned, thetaH1, assumedStDev) # test actions results$testActions <- getTestActions(stageResults = stageResults) results$.setParameterType("testActions", C_PARAM_GENERATED) if (design$kMax > 1) { # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { results$.conditionalPowerResults <- .getConditionalPowerMeans( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, assumedStDev = assumedStDev, thetaH1 = thetaH1, iterations = iterations, seed = seed ) .synchronizeIterationsAndSeed(results) } else { results$.conditionalPowerResults <- .getConditionalPowerMeans( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, assumedStDev = assumedStDev, thetaH1 = thetaH1 ) results$conditionalPower <- results$.conditionalPowerResults$conditionalPower results$.setParameterType("conditionalPower", C_PARAM_GENERATED) } .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() if (.isTrialDesignFisher(design) && isTRUE(.getOptionalArgument("simulateCRP", ...))) { results$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) seed <- results$.conditionalPowerResults$seed crp <- getConditionalRejectionProbabilities( stageResults = stageResults, iterations = iterations, seed = seed ) results$conditionalRejectionProbabilities <- crp$crpFisherSimulated paramTypeSeed <- results$.conditionalPowerResults$.getParameterType("seed") if (paramTypeSeed != C_PARAM_TYPE_UNKNOWN) { results$.setParameterType("seed", paramTypeSeed) } results$seed <- seed } else { results$conditionalRejectionProbabilities <- getConditionalRejectionProbabilities(stageResults = stageResults) } results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_GENERATED) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) } # RCI - repeated confidence interval startTime <- Sys.time() repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsMeans( design = design, dataInput = dataInput, stage = stage, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance ) results$repeatedConfidenceIntervalLowerBounds <- repeatedConfidenceIntervals[1, ] results$repeatedConfidenceIntervalUpperBounds <- repeatedConfidenceIntervals[2, ] .logProgress("Repeated confidence interval calculated", startTime = startTime) # repeated p-value startTime <- Sys.time() results$repeatedPValues <- getRepeatedPValues( stageResults = stageResults, tolerance = tolerance ) .logProgress("Repeated p-values calculated", startTime = startTime) results$.setParameterType("repeatedConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("repeatedConfidenceIntervalUpperBounds", C_PARAM_GENERATED) results$.setParameterType("repeatedPValues", C_PARAM_GENERATED) if (design$kMax > 1) { startTime <- Sys.time() # final p-value finalPValue <- getFinalPValue(stageResults, showWarnings = FALSE) results$finalPValues <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalPValue$pFinal, finalStage = finalPValue$finalStage ) results$finalStage <- finalPValue$finalStage results$.setParameterType("finalPValues", C_PARAM_GENERATED) results$.setParameterType("finalStage", C_PARAM_GENERATED) .logProgress("Final p-value calculated", startTime = startTime) # final confidence interval & median unbiased estimate startTime <- Sys.time() finalConfidenceIntervals <- .getFinalConfidenceIntervalMeans( design = design, dataInput = dataInput, thetaH0 = thetaH0, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance ) if (!is.null(finalConfidenceIntervals)) { finalStage <- finalConfidenceIntervals$finalStage results$finalConfidenceIntervalLowerBounds <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[1], finalStage = finalStage ) results$finalConfidenceIntervalUpperBounds <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[2], finalStage = finalStage ) results$medianUnbiasedEstimates <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalConfidenceIntervals$medianUnbiased, finalStage = finalStage ) results$.setParameterType("finalConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("finalConfidenceIntervalUpperBounds", C_PARAM_GENERATED) results$.setParameterType("medianUnbiasedEstimates", C_PARAM_GENERATED) .logProgress("Final confidence interval calculated", startTime = startTime) } } return(results) } .getStageResultsMeans <- function(..., design, dataInput, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, stage = NA_integer_, userFunctionCallEnabled = FALSE) { .assertIsDatasetMeans(dataInput = dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertIsValidDirectionUpper(directionUpper, design$sided, userFunctionCallEnabled = userFunctionCallEnabled ) .assertIsSingleLogical(normalApproximation, "normalApproximation") .assertIsSingleLogical(equalVariances, "equalVariances") .warnInCaseOfUnknownArguments( functionName = "getStageResultsMeans", ignore = .getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), ... ) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design, stage = stage ) effectSizes <- rep(NA_real_, design$kMax) if (dataInput$getNumberOfGroups() == 1) { overallTestStatistics <- c((dataInput$getOverallMeansUpTo(stage) - thetaH0) / dataInput$getOverallStDevsUpTo(stage) * sqrt(dataInput$getOverallSampleSizesUpTo(stage)), rep(NA_real_, design$kMax - stage)) if (normalApproximation) { overallPValues <- 1 - stats::pnorm(overallTestStatistics) } else { overallPValues <- 1 - stats::pt( overallTestStatistics, dataInput$getOverallSampleSizesUpTo(stage) - 1 ) } effectSizes[1:stage] <- dataInput$getOverallMeansUpTo(stage) } if (dataInput$getNumberOfGroups() == 2) { # common variance overallStDevs <- rep(NA_real_, design$kMax) for (k in 1:stage) { overallStDevs[k] <- sqrt(((sum(dataInput$getSampleSizesUpTo(k, 1)) - 1) * dataInput$getOverallStDev(k)^2 + (sum(dataInput$getSampleSizesUpTo(k, 2)) - 1) * dataInput$getOverallStDev(k, 2)^2) / (sum(dataInput$getSampleSizesUpTo(k, 1)) + sum(dataInput$getSampleSizesUpTo(k, 2)) - 2)) } overallSampleSizes1 <- dataInput$getOverallSampleSizesUpTo(stage) overallSampleSizes2 <- dataInput$getOverallSampleSizesUpTo(stage, 2) if (equalVariances) { overallTestStatistics <- c( (dataInput$getOverallMeansUpTo(stage) - dataInput$getOverallMeansUpTo(stage, 2) - thetaH0) / overallStDevs[1:stage] / sqrt(1 / overallSampleSizes1 + 1 / overallSampleSizes2), rep(NA_real_, design$kMax - stage) ) } else { overallTestStatistics <- c( (dataInput$getOverallMeansUpTo(stage) - dataInput$getOverallMeansUpTo(stage, 2) - thetaH0) / (sqrt(dataInput$getOverallStDevsUpTo(stage)^2 / overallSampleSizes1 + dataInput$getOverallStDevsUpTo(stage, 2)^2 / overallSampleSizes2)), rep(NA_real_, design$kMax - stage) ) } if (normalApproximation) { overallPValues <- 1 - stats::pnorm(overallTestStatistics) } else { if (equalVariances) { overallPValues <- 1 - stats::pt( overallTestStatistics, overallSampleSizes1 + overallSampleSizes2 - 2 ) } else { u <- dataInput$getOverallStDevsUpTo(stage)^2 / overallSampleSizes1 / (dataInput$getOverallStDevsUpTo(stage)^2 / overallSampleSizes1 + dataInput$getOverallStDevsUpTo(stage, 2)^2 / overallSampleSizes2) overallPValues <- 1 - stats::pt( overallTestStatistics, 1 / (u^2 / (overallSampleSizes1 - 1) + (1 - u)^2 / (overallSampleSizes2 - 1)) ) } } effectSizes[1:stage] <- dataInput$getOverallMeansUpTo(stage) - dataInput$getOverallMeansUpTo(stage, 2) } if (!directionUpper) { overallPValues <- 1 - overallPValues } # calculation of stage-wise test statistics and combination tests testStatistics <- rep(NA_real_, design$kMax) pValues <- rep(NA_real_, design$kMax) combInverseNormal <- rep(NA_real_, design$kMax) combFisher <- rep(NA_real_, design$kMax) weightsInverseNormal <- .getWeightsInverseNormal(design) weightsFisher <- .getWeightsFisher(design) for (k in 1:stage) { if (dataInput$getNumberOfGroups() == 1) { # stage-wise test statistics testStatistics[k] <- (dataInput$getMean(k) - thetaH0) / dataInput$getStDev(k) * sqrt(dataInput$getSampleSize(k)) if (normalApproximation) { # stage-wise p-values pValues[k] <- 1 - stats::pnorm(testStatistics[k]) } else { pValues[k] <- 1 - stats::pt(testStatistics[k], dataInput$getSampleSize(k) - 1) } } if (dataInput$getNumberOfGroups() == 2) { # stage-wise test statistics if (equalVariances) { testStatistics[k] <- (dataInput$getMean(k, 1) - dataInput$getMean(k, 2) - thetaH0) / sqrt(((dataInput$getSampleSize(k, 1) - 1) * dataInput$getStDev(k, 1)^2 + (dataInput$getSampleSize(k, 2) - 1) * dataInput$getStDev(k, 2)^2) / (dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2) - 2)) / sqrt(1 / dataInput$getSampleSize(k, 1) + 1 / dataInput$getSampleSize(k, 2)) } else { testStatistics[k] <- (dataInput$getMean(k, 1) - dataInput$getMean(k, 2) - thetaH0) / sqrt(dataInput$getStDev(k, 1)^2 / dataInput$getSampleSize(k, 1) + dataInput$getStDev(k, 2)^2 / dataInput$getSampleSize(k, 2)) } if (normalApproximation) { # stage-wise p-values pValues[k] <- 1 - stats::pnorm(testStatistics[k]) } else { if (equalVariances) { pValues[k] <- 1 - stats::pt( testStatistics[k], dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2) - 2 ) } else { u <- dataInput$getStDev(k, 1)^2 / dataInput$getSampleSize(k, 1) / (dataInput$getStDev(k, 1)^2 / dataInput$getSampleSize(k, 1) + dataInput$getStDev(k, 2)^2 / dataInput$getSampleSize(k, 2)) pValues[k] <- 1 - stats::pt( testStatistics[k], 1 / (u^2 / (dataInput$getSampleSize(k, 1) - 1) + (1 - u)^2 / (dataInput$getSampleSize(k, 2) - 1)) ) } } } if (!directionUpper) { pValues[k] <- 1 - pValues[k] } # inverse normal test combInverseNormal[k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(pValues[1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) # Fisher combination test combFisher[k] <- prod(pValues[1:k]^weightsFisher[1:k]) } if (dataInput$getNumberOfGroups() == 1) { stageResults <- StageResultsMeans( design = design, dataInput = dataInput, stage = as.integer(stage), overallTestStatistics = .fillWithNAs(overallTestStatistics, design$kMax), overallPValues = .fillWithNAs(overallPValues, design$kMax), overallMeans = .trimAnalysisMeansResultObjectAndFillWithNAs( dataInput$getOverallMeans(), design$kMax ), overallStDevs = .trimAnalysisMeansResultObjectAndFillWithNAs( dataInput$getOverallStDevs(), design$kMax ), overallSampleSizes = .fillWithNAs(dataInput$getOverallSampleSizesUpTo(stage), design$kMax), testStatistics = testStatistics, effectSizes = effectSizes, pValues = pValues, combInverseNormal = combInverseNormal, combFisher = combFisher, weightsFisher = weightsFisher, weightsInverseNormal = weightsInverseNormal, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), normalApproximation = normalApproximation, equalVariances = equalVariances ) } else if (dataInput$getNumberOfGroups() == 2) { stageResults <- StageResultsMeans( design = design, dataInput = dataInput, stage = as.integer(stage), overallTestStatistics = .fillWithNAs(overallTestStatistics, design$kMax), overallPValues = .fillWithNAs(overallPValues, design$kMax), overallMeans1 = .trimAnalysisMeansResultObjectAndFillWithNAs( dataInput$getOverallMeans(group = 1), design$kMax ), overallMeans2 = .trimAnalysisMeansResultObjectAndFillWithNAs( dataInput$getOverallMeans(group = 2), design$kMax ), overallStDevs1 = .trimAnalysisMeansResultObjectAndFillWithNAs( dataInput$getOverallStDevs(group = 1), design$kMax ), overallStDevs2 = .trimAnalysisMeansResultObjectAndFillWithNAs( dataInput$getOverallStDevs(group = 2), design$kMax ), overallStDevs = overallStDevs, # common variance overallSampleSizes1 = .fillWithNAs(dataInput$getOverallSampleSizesUpTo(stage), design$kMax), overallSampleSizes2 = .fillWithNAs(dataInput$getOverallSampleSizesUpTo(stage, 2), design$kMax), effectSizes = effectSizes, testStatistics = testStatistics, pValues = pValues, combInverseNormal = combInverseNormal, combFisher = combFisher, weightsFisher = weightsFisher, weightsInverseNormal = weightsInverseNormal, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), normalApproximation = normalApproximation, equalVariances = equalVariances ) } if (.isTrialDesignFisher(design)) { stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) stageResults$.setParameterType("weightsFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) stageResults$.setParameterType("weightsInverseNormal", C_PARAM_GENERATED) } return(stageResults) } .trimAnalysisMeansResultObjectAndFillWithNAs <- function(x, kMax) { return(.fillWithNAs(.trimAnalysisMeansResultObject(x, kMax), kMax)) } .trimAnalysisMeansResultObject <- function(x, kMax) { if (is.matrix(x)) { if (ncol(x) <= kMax) { return(x) } return(x[, 1:kMax]) } if (length(x) <= kMax) { return(x) } return(x[1:kMax]) } #' #' Calculation of lower and upper limits of repeated confidence intervals (RCIs) for Means #' #' @noRd #' .getRepeatedConfidenceIntervalsMeans <- function(design, ...) { if (.isTrialDesignGroupSequential(design)) { return(.getRepeatedConfidenceIntervalsMeansGroupSequential(design = design, ...)) } if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsMeansInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsMeansFisher(design = design, ...)) } .stopWithWrongDesignMessage(design, inclusiveConditionalDunnett = FALSE) } .getRootThetaMeans <- function(..., design, dataInput, stage, directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, thetaLow, thetaUp, firstParameterName, secondValue, tolerance, callingFunctionInformation = NA_character_) { result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsMeans( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, callingFunctionInformation = callingFunctionInformation ) return(result) } .getUpperLowerThetaMeans <- function(..., design, dataInput, theta, stage, directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, conditionFunction, firstParameterName, secondValue) { stageResults <- .getStageResultsMeans( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } maxSearchIterations <- 50 while (conditionFunction(secondValue, firstValue)) { theta <- 2 * theta stageResults <- .getStageResultsMeans( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } maxSearchIterations <- maxSearchIterations - 1 if (maxSearchIterations < 0) { stop(sprintf( paste0( "Failed to find theta (k = %s, firstValue = %s, ", "secondValue = %s, levels(firstValue) = %s, theta = %s)" ), stage, stageResults[[firstParameterName]][stage], secondValue, firstValue, theta )) } } return(theta) } .getRepeatedConfidenceIntervalsMeansAll <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) futilityCorr <- rep(NA_real_, design$kMax) # necessary for adjustment for binding futility boundaries criticalValues <- design$criticalValues criticalValues[is.infinite(criticalValues) & criticalValues > 0] <- C_QNORM_MAXIMUM criticalValues[is.infinite(criticalValues) & criticalValues < 0] <- C_QNORM_MINIMUM if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT conditionFunction <- .isFirstValueSmallerThanSecondValue } else { bounds <- design$futilityBounds border <- C_FUTILITY_BOUNDS_DEFAULT conditionFunction <- .isFirstValueGreaterThanSecondValue } repeatedConfidenceIntervals <- matrix(NA_real_, nrow = 2, ncol = design$kMax) for (k in 1:stage) { startTime <- Sys.time() if (criticalValues[k] < C_QNORM_MAXIMUM) { # finding maximum upper and minimum lower bounds for RCIs thetaLow <- .getUpperLowerThetaMeans( design = design, dataInput = dataInput, theta = -1, stage = k, directionUpper = TRUE, normalApproximation = normalApproximation, equalVariances = equalVariances, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k] ) thetaUp <- .getUpperLowerThetaMeans( design = design, dataInput = dataInput, theta = 1, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, equalVariances = equalVariances, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k] ) # finding upper and lower RCI limits through root function repeatedConfidenceIntervals[1, k] <- .getRootThetaMeans( design = design, dataInput = dataInput, stage = k, directionUpper = TRUE, normalApproximation = normalApproximation, equalVariances = equalVariances, thetaLow = thetaLow, thetaUp = thetaUp, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, callingFunctionInformation = paste0("Repeated confidence interval [1, ", k, "]") ) repeatedConfidenceIntervals[2, k] <- .getRootThetaMeans( design = design, dataInput = dataInput, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, equalVariances = equalVariances, thetaLow = thetaLow, thetaUp = thetaUp, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, callingFunctionInformation = paste0("Repeated confidence interval [2, ", k, "]") ) # adjustment for binding futility bounds if (k > 1 && !is.na(bounds[k - 1]) && conditionFunction(bounds[k - 1], border) && design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "pValues", firstParameterName) # Calculate new lower and upper bounds if (directionUpper) { thetaLow <- .getUpperLowerThetaMeans( design = design, dataInput = dataInput, theta = -1, stage = k - 1, directionUpper = TRUE, normalApproximation = normalApproximation, equalVariances = equalVariances, conditionFunction = conditionFunction, firstParameterName = parameterName, secondValue = bounds[k - 1] ) } else { thetaUp <- .getUpperLowerThetaMeans( design = design, dataInput = dataInput, theta = 1, stage = k - 1, directionUpper = FALSE, normalApproximation = normalApproximation, equalVariances = equalVariances, conditionFunction = conditionFunction, firstParameterName = parameterName, secondValue = bounds[k - 1] ) } futilityCorr[k] <- .getRootThetaMeans( design = design, dataInput = dataInput, stage = k - 1, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, thetaLow = thetaLow, thetaUp = thetaUp, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance, callingFunctionInformation = paste0("Repeated confidence interval, futility correction [", k, "]") ) if (directionUpper) { repeatedConfidenceIntervals[1, k] <- min(min(futilityCorr[2:k]), repeatedConfidenceIntervals[1, k]) } else { repeatedConfidenceIntervals[2, k] <- max(max(futilityCorr[2:k]), repeatedConfidenceIntervals[2, k]) } } if (!is.na(repeatedConfidenceIntervals[1, k]) && !is.na(repeatedConfidenceIntervals[2, k]) && repeatedConfidenceIntervals[1, k] > repeatedConfidenceIntervals[2, k]) { repeatedConfidenceIntervals[, k] <- rep(NA_real_, 2) } } .logProgress("Repeated confidence interval of stage %s calculated", startTime = startTime, k) } return(repeatedConfidenceIntervals) } #' #' RCIs based on group sequential combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsMeansGroupSequential <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsMeansGroupSequential", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsMeansAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, equalVariances = equalVariances, directionUpper = directionUpper, tolerance = tolerance, firstParameterName = "overallPValues", ... )) } #' #' RCIs based on inverse normal combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsMeansInverseNormal <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsMeansInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsMeansAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, equalVariances = equalVariances, directionUpper = directionUpper, tolerance = tolerance, firstParameterName = "combInverseNormal", ... )) } #' #' RCIs based on Fisher's combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsMeansFisher <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsMeansFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsMeansAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, equalVariances = equalVariances, directionUpper = directionUpper, tolerance = tolerance, firstParameterName = "combFisher", ... )) } #' #' Calculation of conditional power based on group sequential method #' #' @noRd #' .getConditionalPowerMeansGroupSequential <- function(..., stageResults, stage = stageResults$stage, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_, assumedStDev = NA_real_) { design <- stageResults$.design .assertIsTrialDesignGroupSequential(design) .assertIsValidStage(stage, design$kMax) assumedStDev <- .assertIsValidAssumedStDev(assumedStDev, stageResults, stage) thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerMeansGroupSequential", ignore = c("stage", "design", "stageResultsName", "grid", "stDevH1"), ... ) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) weights <- stageResults$weightsInverseNormal informationRates <- design$informationRates nPlanned <- c(rep(NA, stage), nPlanned) if (stage == kMax) { .logDebug( "Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", design$kMax, ")" ) return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } criticalValues <- design$criticalValues if (stageResults$isTwoSampleDataset()) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned } if (stageResults$direction == "upper") { thetaH1 <- (thetaH1 - stageResults$thetaH0) / assumedStDev } else { thetaH1 <- -(thetaH1 - stageResults$thetaH0) / assumedStDev } # shifted decision region for use in getGroupSeqProbs # Group Sequential Method shiftedDecisionRegionUpper <- criticalValues[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - .getOneMinusQNorm(stageResults$overallPValues[stage]) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (design$sided == 2) { shiftedDecisionRegionLower <- -criticalValues[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% .getOneMinusQNorm(stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) } if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - .getOneMinusQNorm(stageResults$overallPValues[stage]) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) if (design$sided == 2) { decisionMatrix <- matrix(c(shiftedDecisionRegionLower, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) if (design$twoSidedPower) { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } if (stageResults$isTwoSampleDataset()) { nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned } return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } #' #' Calculation of conditional power based on inverse normal method #' #' @noRd #' .getConditionalPowerMeansInverseNormal <- function(..., stageResults, stage = stageResults$stage, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_, assumedStDev = NA_real_) { design <- stageResults$.design .assertIsTrialDesignInverseNormal(design) .assertIsValidStage(stage, design$kMax) assumedStDev <- .assertIsValidAssumedStDev(assumedStDev, stageResults, stage) thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerMeansInverseNormal", ignore = c("stage", "design", "stageResultsName", "grid", "stDevH1"), ... ) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) weights <- stageResults$weightsInverseNormal informationRates <- design$informationRates nPlanned <- c(rep(NA_real_, stage), nPlanned) if (stage == kMax) { .logDebug( "Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", design$kMax, ")" ) return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } criticalValuesInverseNormal <- design$criticalValues if (stageResults$isTwoSampleDataset()) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned } if (stageResults$direction == "upper") { thetaH1 <- (thetaH1 - stageResults$thetaH0) / assumedStDev } else { thetaH1 <- -(thetaH1 - stageResults$thetaH0) / assumedStDev } # shifted decision region for use in getGroupSeqProbs # Inverse Normal Method shiftedDecisionRegionUpper <- criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% .getOneMinusQNorm(stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (design$sided == 2) { shiftedDecisionRegionLower <- -criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% .getOneMinusQNorm(stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) } if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - c(weights[1:stage] %*% .getOneMinusQNorm(stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) if (design$sided == 2) { decisionMatrix <- matrix(c(shiftedDecisionRegionLower, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) if (design$twoSidedPower) { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } if (stageResults$isTwoSampleDataset()) { nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned } return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } #' #' Calculation of conditional power based on Fisher combination test #' #' @noRd #' .getConditionalPowerMeansFisher <- function(..., stageResults, stage = stageResults$stage, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_, assumedStDev = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { design <- stageResults$.design .assertIsTrialDesignFisher(design) .assertIsValidStage(stage, design$kMax) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) assumedStDev <- .assertIsValidAssumedStDev(assumedStDev, stageResults, stage) thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerMeansFisher", ignore = c("stage", "design", "stageResultsName", "grid", "stDevH1"), ... ) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) seed <- .setSeed(seed) simulated <- FALSE .assertIsValidNPlanned(nPlanned, kMax, stage) nPlanned <- c(rep(NA_real_, stage), nPlanned) if (stageResults$isTwoSampleDataset()) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval( allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM ) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned } if (stageResults$direction == "upper") { thetaH1 <- (thetaH1 - stageResults$thetaH0) / assumedStDev } else { thetaH1 <- -(thetaH1 - stageResults$thetaH0) / assumedStDev } criticalValues <- design$criticalValues weightsFisher <- stageResults$weightsFisher pValues <- stageResults$pValues if (stage < kMax - 1) { for (k in (stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = thetaH1, stage = stage, nPlanned = nPlanned ) } conditionalPower[k] <- reject / iterations } simulated <- TRUE } else if (stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1 / weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Calculation not possible: could not calculate ", "conditional power for stage ", kMax, call. = FALSE ) conditionalPower[kMax] <- NA_real_ } else { conditionalPower[kMax] <- 1 - stats::pnorm(.getQNorm(result) - thetaH1 * sqrt(nPlanned[kMax])) } } if (stageResults$isTwoSampleDataset()) { nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned } return(list( nPlanned = nPlanned, conditionalPower = conditionalPower, iterations = as.integer(iterations), seed = seed, simulated = simulated )) } .getConditionalPowerMeans <- function(..., stageResults, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaH1 = NA_real_, assumedStDev = NA_real_) { stDevH1 <- .getOptionalArgument("stDevH1", ...) if (!is.null(stDevH1) && !is.na(stDevH1)) { if (!is.na(assumedStDev)) { warning(sQuote("assumedStDev"), " will be ignored because ", sQuote("stDevH1"), " is defined", call. = FALSE ) } assumedStDev <- stDevH1 } .assertIsSingleNumber(thetaH1, "thetaH1", naAllowed = TRUE) .assertIsSingleNumber(assumedStDev, "assumedStDev", naAllowed = TRUE) design <- stageResults$.design results <- ConditionalPowerResultsMeans( .stageResults = stageResults, .design = design, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDev = assumedStDev ) if (any(is.na(nPlanned))) { return(results) } if (!.isValidNPlanned(nPlanned = nPlanned, kMax = design$kMax, stage = stageResults$stage)) { return(results) } if (.isTrialDesignGroupSequential(design)) { cp <- .getConditionalPowerMeansGroupSequential( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDev = assumedStDev, ... ) } else if (.isTrialDesignInverseNormal(design)) { cp <- .getConditionalPowerMeansInverseNormal( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDev = assumedStDev, ... ) } else if (.isTrialDesignFisher(design)) { cp <- .getConditionalPowerMeansFisher( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, assumedStDev = assumedStDev, ... ) results$iterations <- cp$iterations results$seed <- cp$seed results$simulated <- cp$simulated .updateParameterTypeOfIterationsAndSeed(results, ...) } else { .stopWithWrongDesignMessage(design, inclusiveConditionalDunnett = FALSE) } results$nPlanned <- cp$nPlanned results$conditionalPower <- cp$conditionalPower results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$.setParameterType( "allocationRatioPlanned", ifelse(allocationRatioPlanned == C_ALLOCATION_RATIO_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED) ) results$.setParameterType("thetaH1", ifelse(is.na(thetaH1), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) results$.setParameterType("assumedStDev", ifelse(is.na(assumedStDev), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) return(results) } .getConditionalPowerPlotMeans <- function(..., stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaRange, assumedStDev = NA_real_) { .associatedArgumentsAreDefined(nPlanned = nPlanned, thetaRange = thetaRange) .assertIsValidAllocationRatioPlanned( allocationRatioPlanned, stageResults$getDataInput()$getNumberOfGroups() ) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerPlotMeans", ignore = c("iterations", "seed", "stageResultsName", "grid"), ... ) assumedStDev <- .assertIsValidAssumedStDev(assumedStDev, stageResults, stage) thetaRange <- .assertIsValidThetaRange(thetaRange = thetaRange) condPowerValues <- rep(NA, length(thetaRange)) likelihoodValues <- rep(NA, length(thetaRange)) if (stageResults$isOneSampleDataset()) { stdErr <- stageResults$overallStDevs[stage] / sqrt(stageResults$overallSampleSizes[stage]) } else if (stageResults$isTwoSampleDataset()) { stdErr <- stageResults$overallStDevs[stage] * sqrt(1 / stageResults$overallSampleSizes1[stage] + 1 / stageResults$overallSampleSizes2[stage]) } design <- stageResults$.design warningMessages <- c() withCallingHandlers( for (i in seq(along.with = thetaRange)) { if (.isTrialDesignGroupSequential(design)) { condPowerValues[i] <- .getConditionalPowerMeansGroupSequential( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], assumedStDev = assumedStDev )$conditionalPower[design$kMax] } else if (.isTrialDesignInverseNormal(design)) { condPowerValues[i] <- .getConditionalPowerMeansInverseNormal( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], assumedStDev = assumedStDev )$conditionalPower[design$kMax] } else if (.isTrialDesignFisher(design)) { condPowerValues[i] <- .getConditionalPowerMeansFisher( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], assumedStDev = assumedStDev )$conditionalPower[design$kMax] } likelihoodValues[i] <- stats::dnorm( thetaRange[i], stageResults$effectSizes[stage], stdErr ) / stats::dnorm(0, 0, stdErr) }, warning = function(w) { m <- w$message if (!(m %in% warningMessages)) { warningMessages <<- c(warningMessages, m) } invokeRestart("muffleWarning") }, error = function(e) { e } ) if (length(warningMessages) > 0) { for (m in warningMessages) { warning(m, call. = FALSE) } } if (stageResults$isOneSampleDataset()) { subtitle <- paste0( "Stage = ", stage, ", # of remaining subjects = ", sum(nPlanned), ", sd = ", .formatSubTitleValue(assumedStDev, "assumedStDev") ) } else { subtitle <- paste0( "Stage = ", stage, ", # of remaining subjects = ", sum(nPlanned), ", sd = ", .formatSubTitleValue(assumedStDev, "assumedStDev"), ", allocation ratio = ", .formatSubTitleValue(allocationRatioPlanned, "allocationRatioPlanned") ) } return(list( xValues = thetaRange, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, xlab = "Effect size", ylab = C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD, sub = subtitle )) } #' #' Calculation of final confidence interval #' based on group sequential test without SSR (general case). #' #' @noRd #' .getFinalConfidenceIntervalMeansGroupSequential <- function(..., design, dataInput, stage, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsMeans( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) finalConfidenceIntervalMeansValues <- .getFinalConfidenceIntervalMeansValues( design, dataInput, stageResults, directionUpper, thetaH0, stage, tolerance ) return(list( stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance, finalStage = finalConfidenceIntervalMeansValues$finalStage, medianUnbiasedGeneral = finalConfidenceIntervalMeansValues$medianUnbiasedGeneral, finalConfidenceIntervalGeneral = finalConfidenceIntervalMeansValues$finalConfidenceIntervalGeneral, medianUnbiased = finalConfidenceIntervalMeansValues$medianUnbiased, finalConfidenceInterval = finalConfidenceIntervalMeansValues$finalConfidenceInterval )) } .getFinalConfidenceIntervalMeansValues <- function(design, dataInput, stageResults, directionUpper, thetaH0, stage, tolerance) { finalConfidenceIntervalGeneral <- rep(NA_real_, 2) medianUnbiasedGeneral <- NA_real_ if (.isTrialDesignGroupSequential(design)) { designStage <- .getStageGroupSeq(design = design, stageResults = stageResults, stage = stage) } else { designStage <- .getStageInverseNormal(design = design, stageResults = stageResults, stage = stage) } finalStage <- min(designStage, design$kMax) # early stopping or at end of study if (designStage < design$kMax || stage == design$kMax) { if (designStage == 1) { if (.isTrialDesignGroupSequential(design)) { medianUnbiasedGeneral <- .getOneMinusQNorm(stageResults$overallPValues[1]) } else { medianUnbiasedGeneral <- stageResults$combInverseNormal[1] } finalConfidenceIntervalGeneral[1] <- medianUnbiasedGeneral - .getOneMinusQNorm(design$alpha / design$sided) finalConfidenceIntervalGeneral[2] <- medianUnbiasedGeneral + .getOneMinusQNorm(design$alpha / design$sided) if (dataInput$getNumberOfGroups() == 1) { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral / sqrt(stageResults$overallSampleSizes[1]) medianUnbiasedGeneral <- medianUnbiasedGeneral / sqrt(stageResults$overallSampleSizes[1]) } else { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral * sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) medianUnbiasedGeneral <- medianUnbiasedGeneral * sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) } } else { if (.isTrialDesignInverseNormal(design) && design$kMax > 2 && !.isNoEarlyEfficacy(design)) { message( "Calculation of final confidence interval performed for kMax = ", design$kMax, " (for kMax > 2, it is theoretically shown that it is valid only ", "if no sample size change was performed)" ) } firstParameterName <- ifelse(.isTrialDesignGroupSequential(design), "overallPValues", "combInverseNormal" ) finalConfidenceIntervalGeneral[1] <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = firstParameterName, case = "finalConfidenceIntervalGeneralLower" ) finalConfidenceIntervalGeneral[2] <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = firstParameterName, case = "finalConfidenceIntervalGeneralUpper" ) medianUnbiasedGeneral <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = firstParameterName, case = "medianUnbiasedGeneral" ) } } if (designStage > 1 && is.na(finalConfidenceIntervalGeneral[1])) { finalStage <- NA_integer_ } finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ if (!is.na(finalStage)) { if (designStage == 1) { # retransformation if (dataInput$getNumberOfGroups() == 1) { stdErr <- stageResults$overallStDevs[finalStage] / sqrt(stageResults$overallSampleSizes[finalStage]) } else { stdErr <- stageResults$overallStDevs[finalStage] * sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) } value <- .getOneMinusQNorm(design$alpha / design$sided) * stdErr medianUnbiased <- stageResults$effectSizes[1] finalConfidenceInterval[1] <- medianUnbiased - value finalConfidenceInterval[2] <- medianUnbiased + value } else { directionUpperSign <- ifelse(directionUpper, 1, -1) finalConfidenceInterval <- finalConfidenceIntervalGeneral * stageResults$overallStDevs[finalStage] + directionUpperSign * thetaH0 medianUnbiased <- medianUnbiasedGeneral * stageResults$overallStDevs[finalStage] + directionUpperSign * thetaH0 } } if (!directionUpper) { medianUnbiasedGeneral <- -medianUnbiasedGeneral finalConfidenceIntervalGeneral <- -finalConfidenceIntervalGeneral if (designStage > 1) { medianUnbiased <- -medianUnbiased finalConfidenceInterval <- -finalConfidenceInterval } } if (!any(is.na(finalConfidenceIntervalGeneral))) { finalConfidenceIntervalGeneral <- sort(finalConfidenceIntervalGeneral) } if (!any(is.na(finalConfidenceInterval))) { finalConfidenceInterval <- sort(finalConfidenceInterval) } return(list( finalStage = finalStage, medianUnbiasedGeneral = medianUnbiasedGeneral, finalConfidenceIntervalGeneral = finalConfidenceIntervalGeneral, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } #' #' Calculation of final confidence interval #' based on inverse normal method, only theoretically shown to be valid for kMax <= 2 or no SSR. #' #' @noRd #' .getFinalConfidenceIntervalMeansInverseNormal <- function(..., design, dataInput, stage, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsMeans( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) finalConfidenceIntervalMeansValues <- .getFinalConfidenceIntervalMeansValues( design, dataInput, stageResults, directionUpper, thetaH0, stage, tolerance ) return(list( stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance, finalStage = finalConfidenceIntervalMeansValues$finalStage, medianUnbiasedGeneral = finalConfidenceIntervalMeansValues$medianUnbiasedGeneral, finalConfidenceIntervalGeneral = finalConfidenceIntervalMeansValues$finalConfidenceIntervalGeneral, medianUnbiased = finalConfidenceIntervalMeansValues$medianUnbiased, finalConfidenceInterval = finalConfidenceIntervalMeansValues$finalConfidenceInterval )) } .getQFunctionResultBasedOnDataInput <- function(..., design, dataInput, theta, stage, infRate, directionUpper, normalApproximation, equalVariances) { if (dataInput$getNumberOfGroups() == 1) { stageResults <- .getStageResultsMeans( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation ) } if (dataInput$getNumberOfGroups() == 2) { stageResults <- .getStageResultsMeans( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) } return(.getQFunctionResult( design = design, stageResults = stageResults, theta = theta, infRate = infRate )) } #' #' Calculation of final confidence interval #' based on Fisher combination test, only valid for kMax <= 2. #' #' @noRd #' .getFinalConfidenceIntervalMeansFisher <- function(..., design, dataInput, stage, thetaH0 = C_THETA_H0_MEANS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsMeans( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) stageFisher <- .getStageFisher(design = design, stageResults = stageResults, stage = stage) finalStage <- min(stageFisher, design$kMax) finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ # early stopping or at end of study if (stageFisher < design$kMax || stage == design$kMax) { if (dataInput$getNumberOfGroups() == 1) { infRate <- sqrt(stageResults$overallSampleSizes[1]) stderr <- stageResults$overallStDevs[finalStage] / sqrt(stageResults$overallSampleSizes[finalStage]) } else { infRate <- 1 / sqrt(1 / stageResults$overallSampleSizes1[1] + 1 / stageResults$overallSampleSizes2[1]) stderr <- stageResults$overallStDevs[finalStage] * sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) } if (stageFisher == 1) { finalConfidenceInterval[1] <- stageResults$effectSizes[1] - .getOneMinusQNorm(design$alpha / design$sided) * stderr finalConfidenceInterval[2] <- stageResults$effectSizes[1] + .getOneMinusQNorm(design$alpha / design$sided) * stderr medianUnbiased <- stageResults$effectSizes[1] } else { maxSearchIterations <- 50 if (design$kMax >= 1) { message( "Calculation of final confidence interval for Fisher's ", "design not implemented yet" ) return(list( finalStage = NA_integer_, medianUnbiased = NA_real_, finalConfidenceInterval = rep(NA_real_, design$kMax) )) } thetaLow <- -1 .getQFunctionResult( design = design, stageResults = stageResults, theta = thetaLow, infRate = infRate ) iteration <- 0 while (iteration <= maxSearchIterations && .getQFunctionResultBasedOnDataInput( design = design, dataInput = dataInput, theta = thetaLow, stage = finalStage, infRate = infRate, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) > design$alpha / design$sided) { thetaLow <- 2 * thetaLow iteration <- iteration + 1 if (iteration == maxSearchIterations) { thetaLow <- -1 } } thetaUp <- 1 iteration <- 0 while (iteration <= maxSearchIterations && .getQFunctionResultBasedOnDataInput( design = design, dataInput = dataInput, theta = thetaUp, stage = finalStage, infRate = infRate, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) < 1 - design$alpha / design$sided) { thetaUp <- 2 * thetaUp iteration <- iteration + 1 if (iteration == maxSearchIterations) { thetaUp <- 1 } } finalConfidenceInterval[1] <- .getOneDimensionalRoot( function(theta) { return(.getQFunctionResultBasedOnDataInput( design = design, dataInput = dataInput, theta = theta, stage = finalStage, infRate = infRate, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) - design$alpha / design$sided) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, callingFunctionInformation = "Final confidence interval Fisher [1]" ) finalConfidenceInterval[2] <- .getOneDimensionalRoot( function(theta) { return(.getQFunctionResultBasedOnDataInput( design = design, dataInput = dataInput, theta = theta, stage = finalStage, infRate = infRate, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) - 1 + design$alpha / design$sided) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, callingFunctionInformation = "Final confidence interval Fisher [2]" ) medianUnbiased <- .getOneDimensionalRoot( function(theta) { return(.getQFunctionResultBasedOnDataInput( design = design, dataInput = dataInput, theta = theta, stage = finalStage, infRate = infRate, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances ) - 0.5) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, callingFunctionInformation = "Final confidence interval Fisher, median unbiased" ) } if (is.na(finalConfidenceInterval[1])) { finalStage <- NA_integer_ } } return(list( stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance, finalStage = finalStage, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } .getFinalConfidenceIntervalMeans <- function(..., design, dataInput, thetaH0 = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_MEANS_DEFAULT, equalVariances = C_EQUAL_VARIANCES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .warnInCaseOfUnknownArguments( functionName = "getFinalConfidenceIntervalMeans", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) if (design$kMax == 1) { return(list( finalStage = NA_integer_, medianUnbiasedGeneral = NA_real_, finalConfidenceIntervalGeneral = c(NA_real_, NA_real_), medianUnbiased = NA_real_, finalConfidenceInterval = c(NA_real_) )) } if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_MEANS_DEFAULT } if (.isTrialDesignGroupSequential(design)) { return(.getFinalConfidenceIntervalMeansGroupSequential( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance )) } if (.isTrialDesignInverseNormal(design)) { return(.getFinalConfidenceIntervalMeansInverseNormal( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance )) } if (.isTrialDesignFisher(design)) { return(.getFinalConfidenceIntervalMeansFisher( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, equalVariances = equalVariances, tolerance = tolerance )) } .stopWithWrongDesignMessage(design, inclusiveConditionalDunnett = FALSE) } rpact/R/f_simulation_base_means.R0000644000176200001440000005470514445307576016607 0ustar liggesusers## | ## | *Simulation of continuous data with group sequential and combination test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | .getSimulationMeansStageSubjects <- function(..., stage, meanRatio, thetaH0, groups, plannedSubjects, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, sampleSizesPerStage, thetaH1, stDevH1, conditionalPower, conditionalCriticalValue) { if (is.na(conditionalPower)) { return(plannedSubjects[stage] - plannedSubjects[stage - 1]) } thetaStandardized <- thetaH1 / stDevH1 mult <- 1 if (groups == 2) { thetaH0 <- ifelse(meanRatio, thetaH0, 1) mult <- 1 + 1 / allocationRatioPlanned[stage] + thetaH0^2 * (1 + allocationRatioPlanned[stage]) } stageSubjects <- (max(0, conditionalCriticalValue + .getQNorm(conditionalPower)))^2 * mult / (max(1e-12, thetaStandardized))^2 stageSubjects <- min( max(minNumberOfSubjectsPerStage[stage], stageSubjects), maxNumberOfSubjectsPerStage[stage] ) return(stageSubjects) } #' @title #' Get Simulation Means #' #' @description #' Returns the simulated power, stopping probabilities, conditional power, and expected sample size #' for testing means in a one or two treatment groups testing situation. #' #' @inheritParams param_design_with_default #' @inheritParams param_groups #' @param normalApproximation The type of computation of the p-values. Default is \code{TRUE}, #' i.e., normally distributed test statistics are generated. #' If \code{FALSE}, the t test is used for calculating the p-values, #' i.e., t distributed test statistics are generated. #' @param meanRatio If \code{TRUE}, the design characteristics for #' one-sided testing of H0: \code{mu1 / mu2 = thetaH0} are simulated, default is \code{FALSE}. #' @inheritParams param_thetaH0 #' @inheritParams param_alternative_simulation #' @inheritParams param_stDevSimulation #' @inheritParams param_directionUpper #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_plannedSubjects #' @inheritParams param_minNumberOfSubjectsPerStage #' @inheritParams param_maxNumberOfSubjectsPerStage #' @inheritParams param_conditionalPowerSimulation #' @inheritParams param_thetaH1 #' @inheritParams param_stDevH1 #' @inheritParams param_maxNumberOfIterations #' @inheritParams param_calcSubjectsFunction #' @inheritParams param_seed #' @inheritParams param_three_dots #' @inheritParams param_showStatistics #' #' @details #' At given design the function simulates the power, stopping probabilities, conditional power, and expected #' sample size at given number of subjects and parameter configuration. #' Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number #' of subjects in the two treatment groups. #' #' The definition of \code{thetaH1} makes only sense if \code{kMax} > 1 #' and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and #' \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. #' #' \code{calcSubjectsFunction}\cr #' This function returns the number of subjects at given conditional power and conditional critical value for specified #' testing situation. The function might depend on variables #' \code{stage}, #' \code{meanRatio}, #' \code{thetaH0}, #' \code{groups}, #' \code{plannedSubjects}, #' \code{sampleSizesPerStage}, #' \code{directionUpper}, #' \code{allocationRatioPlanned}, #' \code{minNumberOfSubjectsPerStage}, #' \code{maxNumberOfSubjectsPerStage}, #' \code{conditionalPower}, #' \code{conditionalCriticalValue}, #' \code{thetaH1}, and #' \code{stDevH1}. #' The function has to contain the three-dots argument '...' (see examples). #' #' @section Simulation Data: #' The summary statistics "Simulated data" contains the following parameters: median [range]; mean +/-sd\cr #' #' \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable #' the output of the aggregated simulated data.\cr #' #' Example 1: \cr #' \code{simulationResults <- getSimulationMeans(plannedSubjects = 40)} \cr #' \code{simulationResults$show(showStatistics = FALSE)}\cr #' #' Example 2: \cr #' \code{simulationResults <- getSimulationMeans(plannedSubjects = 40)} \cr #' \code{simulationResults$setShowStatistics(FALSE)}\cr #' \code{simulationResults}\cr #' #' \code{\link[=getData]{getData()}} can be used to get the aggregated simulated data from the #' object as \code{\link[base]{data.frame}}. The data frame contains the following columns: #' \enumerate{ #' \item \code{iterationNumber}: The number of the simulation iteration. #' \item \code{stageNumber}: The stage. #' \item \code{alternative}: The alternative hypothesis value. #' \item \code{numberOfSubjects}: The number of subjects under consideration when the #' (interim) analysis takes place. #' \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. #' \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. #' \item \code{testStatistic}: The test statistic that is used for the test decision, #' depends on which design was chosen (group sequential, inverse normal, or Fisher's combination test). #' \item \code{testStatisticsPerStage}: The test statistic for each stage if only data from #' the considered stage is taken into account. #' \item \code{effectEstimate}: Overall simulated standardized effect estimate. #' \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. #' \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for #' selected sample size and effect. The effect is either estimated from the data or can be #' user defined with \code{thetaH1}. #' } #' #' @template return_object_simulation_results #' @template how_to_get_help_for_generics #' #' @template examples_get_simulation_means #' #' @export #' getSimulationMeans <- function(design = NULL, ..., groups = 2L, normalApproximation = TRUE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = seq(0, 1, 0.2), # C_ALTERNATIVE_POWER_SIMULATION_DEFAULT stDev = 1, # C_STDEV_DEFAULT plannedSubjects = NA_real_, directionUpper = TRUE, # C_DIRECTION_UPPER_DEFAULT allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, stDevH1 = NA_real_, maxNumberOfIterations = 1000L, # C_MAX_SIMULATION_ITERATIONS_DEFAULT seed = NA_real_, calcSubjectsFunction = NULL, showStatistics = FALSE) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "simulation") .warnInCaseOfUnknownArguments( functionName = "getSimulationMeans", ignore = c( .getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "showStatistics" ), ... ) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getSimulationMeans", ignore = c("showStatistics"), ...) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsSingleLogical(directionUpper, "directionUpper") .assertIsSingleNumber(thetaH0, "thetaH0") if (meanRatio) { .assertIsInOpenInterval(thetaH0, "thetaH0", 0, NULL, naAllowed = TRUE) .assertIsInOpenInterval(thetaH1, "thetaH1", 0, NULL, naAllowed = TRUE) if (identical(alternative, C_ALTERNATIVE_POWER_SIMULATION_DEFAULT)) { alternative <- C_ALTERNATIVE_POWER_SIMULATION_MEAN_RATIO_DEFAULT } .assertIsInOpenInterval(alternative, "alternative", 0, NULL, naAllowed = TRUE) } .assertIsValidGroupsParameter(groups) .assertIsNumericVector(alternative, "alternative", naAllowed = FALSE) .assertIsNumericVector(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", naAllowed = TRUE) .assertIsNumericVector(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", naAllowed = TRUE) .assertIsSingleNumber(conditionalPower, "conditionalPower", naAllowed = TRUE) .assertIsInOpenInterval(conditionalPower, "conditionalPower", 0, 1, naAllowed = TRUE) .assertIsSingleNumber(thetaH1, "thetaH1", naAllowed = TRUE) .assertIsSingleNumber(stDevH1, "stDevH1", naAllowed = TRUE) .assertIsInOpenInterval(stDevH1, "stDevH1", 0, NULL, naAllowed = TRUE) .assertIsNumericVector(allocationRatioPlanned, "allocationRatioPlanned", naAllowed = TRUE) .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM, naAllowed = TRUE) .assertIsSinglePositiveInteger(maxNumberOfIterations, "maxNumberOfIterations", validateType = FALSE) .assertIsSingleNumber(seed, "seed", naAllowed = TRUE) .assertIsValidStandardDeviation(stDev) .assertIsSingleLogical(showStatistics, "showStatistics", naAllowed = FALSE) .assertIsSingleLogical(normalApproximation, "normalApproximation", naAllowed = FALSE) .assertIsValidPlannedSubjectsOrEvents(design, plannedSubjects, parameterName = "plannedSubjects") simulationResults <- SimulationResultsMeans(design, showStatistics = showStatistics) if (design$sided == 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "only one-sided case is implemented for the simulation design" ) } if (groups == 1L) { if (isTRUE(meanRatio)) { warning("'meanRatio' (", meanRatio, ") will be ignored ", "because it is not applicable for 'groups' = 1", call. = FALSE ) } if (!is.na(allocationRatioPlanned)) { warning("'allocationRatioPlanned' (", allocationRatioPlanned, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE ) simulationResults$allocationRatioPlanned <- NA_real_ } simulationResults$.setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) } else { if (any(is.na(allocationRatioPlanned))) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, design$kMax) } else if (length(allocationRatioPlanned) != design$kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'allocationRatioPlanned' (", .arrayToString(allocationRatioPlanned), ") ", "must have length 1 or ", design$kMax, " (kMax)" ) } if (length(unique(allocationRatioPlanned)) == 1) { .setValueAndParameterType( simulationResults, "allocationRatioPlanned", allocationRatioPlanned[1], defaultValue = 1 ) } else { .setValueAndParameterType( simulationResults, "allocationRatioPlanned", allocationRatioPlanned, defaultValue = rep(1, design$kMax) ) } } thetaH1 <- .ignoreParameterIfNotUsed( "thetaH1", thetaH1, design$kMax > 1, "design is fixed ('kMax' = 1)", "Assumed effect" ) stDevH1 <- .ignoreParameterIfNotUsed( "stDevH1", stDevH1, design$kMax > 1, "design is fixed ('kMax' = 1)", "Assumed effect" ) if (is.na(conditionalPower) && is.null(calcSubjectsFunction) && !is.na(stDevH1)) { warning("'stDevH1' will be ignored because neither 'conditionalPower' nor ", "'calcSubjectsFunction' is defined", call. = FALSE ) } conditionalPower <- .ignoreParameterIfNotUsed( "conditionalPower", conditionalPower, design$kMax > 1, "design is fixed ('kMax' = 1)" ) minNumberOfSubjectsPerStage <- .ignoreParameterIfNotUsed( "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, design$kMax > 1, "design is fixed ('kMax' = 1)" ) maxNumberOfSubjectsPerStage <- .ignoreParameterIfNotUsed( "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, design$kMax > 1, "design is fixed ('kMax' = 1)" ) minNumberOfSubjectsPerStage <- .assertIsValidNumberOfSubjectsPerStage(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, calcSubjectsFunction, design$kMax, endpoint = "means" ) maxNumberOfSubjectsPerStage <- .assertIsValidNumberOfSubjectsPerStage(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, calcSubjectsFunction, design$kMax, endpoint = "means" ) if (design$kMax > 1) { if (!normalApproximation) { if (!all(is.na(minNumberOfSubjectsPerStage)) && (any(minNumberOfSubjectsPerStage < groups * 2))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "minNumberOfSubjectsPerStage not correctly specified" ) } } if (any(maxNumberOfSubjectsPerStage - minNumberOfSubjectsPerStage < 0) && !all(is.na(maxNumberOfSubjectsPerStage - minNumberOfSubjectsPerStage))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") must be not smaller than minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ")" ) } .setValueAndParameterType( simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_ ) .setValueAndParameterType( simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_ ) } if (!is.na(conditionalPower) && design$kMax == 1) { warning("'conditionalPower' will be ignored for fixed sample design", call. = FALSE) } if (!is.null(calcSubjectsFunction) && design$kMax == 1) { warning("'calcSubjectsFunction' will be ignored for fixed sample design", call. = FALSE) } if (is.na(conditionalPower) && is.null(calcSubjectsFunction)) { if (length(minNumberOfSubjectsPerStage) != 1 || !is.na(minNumberOfSubjectsPerStage)) { warning("'minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ") ", "will be ignored because neither 'conditionalPower' nor ", "'calcSubjectsFunction' is defined", call. = FALSE ) simulationResults$minNumberOfSubjectsPerStage <- NA_real_ } if (length(maxNumberOfSubjectsPerStage) != 1 || !is.na(maxNumberOfSubjectsPerStage)) { warning("'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") ", "will be ignored because neither 'conditionalPower' nor ", "'calcSubjectsFunction' is defined", call. = FALSE ) simulationResults$maxNumberOfSubjectsPerStage <- NA_real_ } } simulationResults$.setParameterType( "calcSubjectsFunction", ifelse(design$kMax == 1, C_PARAM_NOT_APPLICABLE, ifelse(!is.null(calcSubjectsFunction) && design$kMax > 1, C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE ) ) ) effect <- alternative - thetaH0 simulationResults$effect <- effect simulationResults$.setParameterType( "effect", ifelse(thetaH0 == 0, C_PARAM_NOT_APPLICABLE, C_PARAM_GENERATED) ) .setValueAndParameterType(simulationResults, "normalApproximation", normalApproximation, TRUE) .setValueAndParameterType(simulationResults, "meanRatio", meanRatio, FALSE) .setValueAndParameterType(simulationResults, "thetaH0", thetaH0, ifelse(meanRatio, 1, 0)) .setValueAndParameterType( simulationResults, "alternative", alternative, C_ALTERNATIVE_POWER_SIMULATION_DEFAULT ) .setValueAndParameterType(simulationResults, "stDev", stDev, C_STDEV_DEFAULT) .setValueAndParameterType(simulationResults, "groups", as.integer(groups), 2L) .setValueAndParameterType( simulationResults, "plannedSubjects", plannedSubjects, NA_real_ ) .setValueAndParameterType( simulationResults, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT ) .setValueAndParameterType(simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "conditionalPower", conditionalPower, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "thetaH1", thetaH1, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "stDevH1", stDevH1, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType( simulationResults, "maxNumberOfIterations", as.integer(maxNumberOfIterations), C_MAX_SIMULATION_ITERATIONS_DEFAULT ) simulationResults$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) simulationResults$seed <- .setSeed(seed) if (.isTrialDesignGroupSequential(design)) { designNumber <- 1L } else if (.isTrialDesignInverseNormal(design)) { designNumber <- 2L } else if (.isTrialDesignFisher(design)) { designNumber <- 3L } if (.isTrialDesignFisher(design)) { alpha0Vec <- design$alpha0Vec futilityBounds <- rep(NA_real_, design$kMax - 1) } else { alpha0Vec <- rep(NA_real_, design$kMax - 1) futilityBounds <- design$futilityBounds } if (is.na(stDevH1)) { stDevH1 <- stDev } calcSubjectsFunctionList <- .getCalcSubjectsFunction( design = design, simulationResults = simulationResults, calcFunction = calcSubjectsFunction, expectedFunction = .getSimulationMeansStageSubjects ) calcSubjectsFunctionType <- calcSubjectsFunctionList$calcSubjectsFunctionType calcSubjectsFunctionR <- calcSubjectsFunctionList$calcSubjectsFunctionR calcSubjectsFunctionCpp <- calcSubjectsFunctionList$calcSubjectsFunctionCpp cppResult <- getSimulationMeansLoopCpp( alternative = alternative, kMax = design$kMax, maxNumberOfIterations = maxNumberOfIterations, designNumber = designNumber, informationRates = design$informationRates, futilityBounds = futilityBounds, alpha0Vec = alpha0Vec, criticalValues = design$criticalValues, meanRatio = meanRatio, thetaH0 = thetaH0, stDev = stDev, groups = groups, normalApproximation = normalApproximation, plannedSubjects = plannedSubjects, directionUpper = directionUpper, allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, conditionalPower = conditionalPower, thetaH1 = thetaH1, stDevH1 = stDevH1, calcSubjectsFunctionType = calcSubjectsFunctionType, calcSubjectsFunctionR = calcSubjectsFunctionR, calcSubjectsFunctionCpp = calcSubjectsFunctionCpp ) sampleSizes <- cppResult$sampleSizes sampleSizes[is.na(sampleSizes)] <- 0 simulationResults$iterations <- cppResult$iterations simulationResults$sampleSizes <- sampleSizes simulationResults$rejectPerStage <- cppResult$rejectPerStage simulationResults$overallReject <- cppResult$overallReject simulationResults$futilityPerStage <- cppResult$futilityPerStage simulationResults$futilityStop <- cppResult$futilityStop if (design$kMax > 1) { if (length(alternative) == 1) { simulationResults$earlyStop <- sum(cppResult$futilityPerStage) + sum(cppResult$rejectPerStage[1:(design$kMax - 1)]) } else { if (design$kMax > 2) { rejectPerStageColSum <- colSums(cppResult$rejectPerStage[1:(design$kMax - 1), ]) } else { rejectPerStageColSum <- cppResult$rejectPerStage[1, ] } simulationResults$earlyStop <- colSums(cppResult$futilityPerStage) + rejectPerStageColSum } } else { simulationResults$earlyStop <- rep(0, length(alternative)) } simulationResults$expectedNumberOfSubjects <- cppResult$expectedNumberOfSubjects simulationResults$conditionalPowerAchieved <- cppResult$conditionalPowerAchieved if (!all(is.na(simulationResults$conditionalPowerAchieved))) { simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } data <- cppResult$data data <- data[!is.na(data$alternative), ] if (designNumber != 3L) { data <- data[, colnames(data) != "pValue"] } data$trialStop <- as.logical(data$trialStop) simulationResults$.data <- data return(simulationResults) } rpact/R/class_design_set.R0000644000176200001440000011545314445307575015247 0ustar liggesusers## | ## | *Trial design set classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_plot.R #' @include f_core_utilities.R NULL #' @title #' Get Design Set #' #' @description #' Creates a trial design set object and returns it. #' #' @param ... \code{designs} or \code{design} and one or more design parameters, e.g., \code{deltaWT = c(0.1, 0.3, 0.4)}. #' \itemize{ #' \item \code{design} The master design (optional, you need to specify an #' additional parameter that shall be varied). #' \item \code{designs} The designs to compare (optional, you need to specify the variable \code{variedParameters}). #' } #' #' @details #' Specify a master design and one or more design parameters or a list of designs. #' #' @return Returns a \code{\link{TrialDesignSet}} object. #' The following generics (R generic functions) are available for this result object: #' \itemize{ #' \item \code{\link[=names.TrialDesignSet]{names}} to obtain the field names, #' \item \code{\link[=length.TrialDesignSet]{length}} to obtain the number of design, #' \item \code{\link[=print.FieldSet]{print()}} to print the object, #' \item \code{\link[=summary.TrialDesignSet]{summary()}} to display a summary of the object, #' \item \code{\link[=plot.TrialDesignSet]{plot()}} to plot the object, #' \item \code{\link[=as.data.frame.TrialDesignSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, #' \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. #' } #' @template how_to_get_help_for_generics #' #' @examples #' # Example 1 #' design <- getDesignGroupSequential( #' alpha = 0.05, kMax = 6, #' sided = 2, typeOfDesign = "WT", deltaWT = 0.1 #' ) #' designSet <- getDesignSet() #' designSet$add(design = design, deltaWT = c(0.3, 0.4)) #' \dontrun{ #' if (require(ggplot2)) plot(designSet, type = 1) #' } #' #' # Example 2 (shorter script) #' design <- getDesignGroupSequential( #' alpha = 0.05, kMax = 6, #' sided = 2, typeOfDesign = "WT", deltaWT = 0.1 #' ) #' designSet <- getDesignSet(design = design, deltaWT = c(0.3, 0.4)) #' \dontrun{ #' if (require(ggplot2)) plot(designSet, type = 1) #' } #' #' # Example 3 (use of designs instead of design) #' d1 <- getDesignGroupSequential( #' alpha = 0.05, kMax = 2, #' sided = 1, beta = 0.2, typeOfDesign = "asHSD", #' gammaA = 0.5, typeBetaSpending = "bsHSD", gammaB = 0.5 #' ) #' d2 <- getDesignGroupSequential( #' alpha = 0.05, kMax = 4, #' sided = 1, beta = 0.2, typeOfDesign = "asP", #' typeBetaSpending = "bsP" #' ) #' designSet <- getDesignSet( #' designs = c(d1, d2), #' variedParameters = c("typeOfDesign", "kMax") #' ) #' \dontrun{ #' if (require(ggplot2)) plot(designSet, type = 8, nMax = 20) #' } #' #' @export #' getDesignSet <- function(...) { return(TrialDesignSet(...)) } #' #' @title #' Trial Design Set Summary #' #' @description #' Displays a summary of \code{\link{ParameterSet}} object. #' #' @param object A \code{\link{ParameterSet}} object. #' @inheritParams param_digits #' @inheritParams param_three_dots #' #' @details #' Summarizes the trial designs. #' #' @template details_summary #' #' @template return_object_summary #' @template how_to_get_help_for_generics #' #' @export #' #' @keywords internal #' summary.TrialDesignSet <- function(object, ..., type = 1, digits = NA_integer_) { .warnInCaseOfUnknownArguments(functionName = "summary.TrialDesignSet", ...) .assertIsTrialDesignSet(object) if (object$isEmpty()) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "cannot create summary because the design set is empty") } summaries <- list() for (design in object$designs) { s <- .createSummary(design, digits = digits) summaries <- c(summaries, s) } return(summaries) } #' #' @name TrialDesignSet #' #' @title #' Class for trial design sets. #' #' @description #' \code{TrialDesignSet} is a class for creating a collection of different trial designs. #' #' @template field_designs #' @template field_design #' @template field_variedParameters #' #' @details #' This object cannot be created directly; better use \code{\link[=getDesignSet]{getDesignSet()}} #' with suitable arguments to create a set of designs. #' #' @seealso \code{\link[=getDesignSet]{getDesignSet()}} #' #' @include class_core_parameter_set.R #' @include class_core_plot_settings.R #' @include f_core_plot.R #' @include f_logger.R #' #' @keywords internal #' #' @importFrom methods new #' TrialDesignSet <- setRefClass("TrialDesignSet", contains = "FieldSet", fields = list( .plotSettings = "PlotSettings", designs = "list", variedParameters = "character" ), methods = list( # # @param ... 'designs' OR 'design' and one or more design parameters, e.g., deltaWT = c(0.1, 0.3, 0.4) # initialize = function(...) { .plotSettings <<- PlotSettings() designs <<- list() variedParameters <<- character(0) if (length(list(...)) > 0) { add(...) } if (length(designs) > 0) { masterDesign <- designs[[1]] if (inherits(masterDesign, "ParameterSet")) { .self$.plotSettings <<- masterDesign$.plotSettings } } }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing trial design sets" .resetCat() .cat("Trial design set with ", length(designs), " designs\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) for (design in designs) { design$.show(showType = showType, consoleOutputEnabled = consoleOutputEnabled) } }, isEmpty = function() { return(length(designs) == 0) }, getSize = function() { return(length(designs)) }, getDesignMaster = function() { if (length(designs) == 0) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "no design master defined") } return(designs[[1]]) }, .validateDesignsArgument = function(designsToAdd, args) { if (!is.list(designsToAdd)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designsToAdd' must be a list") } if (length(designsToAdd) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designsToAdd' must be not empty") } designsToAddValidated <- list() for (d in designsToAdd) { if (.isTrialDesign(d)) { designsToAddValidated <- c(designsToAddValidated, d) } else { parentDesign <- d[[".design"]] if (is.null(parentDesign)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'designsToAdd' must be a list of trial designs (found '", .getClassName(d), "')" ) } warning("Only the parent design of ", .getClassName(d), " was added to trial design set", call. = FALSE ) designsToAddValidated <- c(designsToAddValidated, parentDesign) } } varPar <- args[["variedParameters"]] if (!is.null(varPar) && length(varPar) > 0) { variedParameters <<- c(variedParameters, varPar) } args <- args[!(names(args) %in% c("designs", "variedParameters"))] if (length(args) > 0) { warning("Argument", ifelse(length(args) > 1, "s", ""), " ", .arrayToString(args, encapsulate = TRUE), " will be ignored ", "because for 'designs' only argument 'variedParameters' will be respected", call. = FALSE ) } designs <<- c(designs, designsToAddValidated) }, addVariedParameters = function(varPar) { if (is.null(varPar) || !is.character(varPar)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'varPar' must be a valid character vector") } variedParameters <<- c(variedParameters, varPar) }, .validateOptionalArguments = function(...) { args <- list(...) designsToAdd <- .getOptionalArgument(optionalArgumentName = "designs", ...) if (!is.null(designsToAdd)) { .validateDesignsArgument(designsToAdd = designsToAdd, args = args) return(NULL) } design <- .getOptionalArgument(optionalArgumentName = "design", ...) optionalArgumentsDefined <- (length(args) > 0) if (is.null(design) && !optionalArgumentsDefined) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "please specify a 'design' to add and/or a design parameter, ", "e.g., deltaWT = c(0.1, 0.3, 0.4)" ) } if (is.null(design) && optionalArgumentsDefined && length(designs) == 0) { stop( C_EXCEPTION_TYPE_INCOMPLETE_ARGUMENTS, "at least one design (master) must be defined in this ", "design set to respect any design parameters" ) } if (!is.null(design)) { designs <<- c(designs, design) } else if (length(designs) > 0) { design <- designs[[1]] # use design master } if (!.isTrialDesign(design)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' (", .getClassName(design), ") must be an instance of class 'TrialDesign'" ) } .getArgumentNames(validatedDesign = design, ...) invisible(design) }, .getArgumentNames = function(validatedDesign, ...) { args <- list(...) if (length(args) == 0) { return(character(0)) } argumentNames <- names(args) if (length(argumentNames) == 0) { warning("No argument names available for ", paste(args, collapse = ", "), call. = FALSE) return(character(0)) } argumentNames <- argumentNames[nchar(argumentNames) != 0] argumentNames <- argumentNames[!(argumentNames %in% c("design", "designs", "singleDesign"))] visibleFieldNames <- validatedDesign$.getVisibleFieldNames() for (arg in argumentNames) { if (!(arg %in% visibleFieldNames)) { stop(sprintf(paste0( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'%s' does not contain a field with name '%s'" ), .getClassName(validatedDesign), arg)) } } invisible(argumentNames) }, add = function(...) { "Adds 'designs' OR a 'design' and/or a design parameter, e.g., deltaWT = c(0.1, 0.3, 0.4)" design <- .validateOptionalArguments(...) args <- list(...) singleDesign <- args[["singleDesign"]] if (!is.null(singleDesign) && is.logical(singleDesign) && singleDesign) { return(invisible()) } if (!is.null(design)) { d <- .createDesignVariants(validatedDesign = design, ...) designs <<- c(designs, d) } }, assertHaveEqualSidedValues = function() { if (length(designs) == 0) { return(invisible()) } sided <- getDesignMaster()$sided for (design in designs) { if (sided != design$sided) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "designs have different directions of alternative (design master is ", ifelse(sided == 1, "one", "two"), " sided)" ) } } }, .createDesignVariants = function(validatedDesign, ...) { .assertIsTrialDesign(validatedDesign) argumentNames <- .getArgumentNames(validatedDesign = validatedDesign, ...) if (length(argumentNames) == 0) { warning("Creation of design variants stopped: no valid design parameters found", call. = FALSE) return(list()) } if (length(argumentNames) > 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "too many arguments (", .arrayToString(argumentNames, encapsulate = TRUE), "): up to 2 design parameters are allowed" ) } designVariants <- .createDesignVariantsRecursive( designMaster = validatedDesign, args = list(...), argumentIndex = 1, argumentNames = argumentNames ) return(designVariants) }, .designSettingExists = function(parameterName, parameterValue, numberOfArguments = 1, parameterNameBefore = NULL, parameterValueBefore = NULL) { if (length(designs) == 0) { return(FALSE) } for (design in designs) { if (!is.null(parameterNameBefore) && !is.null(parameterValueBefore)) { if (design[[parameterNameBefore]] == parameterValueBefore && design[[parameterName]] == parameterValue) { return(TRUE) } } else if (numberOfArguments == 1) { if (design[[parameterName]] == parameterValue) { return(TRUE) } } } return(FALSE) }, .createDesignVariantsRecursive = function(designMaster, args, argumentIndex, argumentNames, parameterNameBefore = NULL, parameterValueBefore = NULL) { if (argumentIndex > length(argumentNames)) { return(list()) } designVariants <- list() argumentName <- argumentNames[argumentIndex] variedParameters <<- unique(c(variedParameters, argumentName)) argumentValues <- args[[argumentName]] for (argumentValue in argumentValues) { if (.designSettingExists(argumentName, argumentValue, numberOfArguments = length(argumentNames), parameterNameBefore, parameterValueBefore )) { if (!is.null(parameterNameBefore) && !is.null(parameterValueBefore)) { warning(sprintf( "Argument ignored: there exists already a design with %s = %s (%s = %s)", argumentName, argumentValue, parameterNameBefore, parameterValueBefore ), call. = FALSE) } else { warning(sprintf( "Argument ignored: there exists already a design with %s = %s", argumentName, argumentValue ), call. = FALSE) } } else { designMaster2 <- .createDesignVariant( designMaster = designMaster, argumentName = argumentName, argumentValue = argumentValue ) if (argumentIndex == length(argumentNames)) { if (is.null(parameterNameBefore) || is.null(parameterValueBefore)) { .logDebug("Create design variant %s = %s", argumentName, argumentValue) } else { .logDebug( "Create design variant %s = %s (%s = %s)", argumentName, argumentValue, parameterNameBefore, parameterValueBefore ) } designVariants <- c(designVariants, designMaster2) } designCopies2 <- .createDesignVariantsRecursive( designMaster = designMaster2, args = args, argumentIndex = argumentIndex + 1, argumentNames = argumentNames, parameterNameBefore = argumentName, parameterValueBefore = argumentValue ) if (length(designCopies2) > 0) { designVariants <- c(designVariants, designCopies2) } } } return(designVariants) }, .createDesignVariant = function(designMaster, argumentName, argumentValue) { if (.isTrialDesignGroupSequential(designMaster)) { defaultValues <- .getDesignGroupSequentialDefaultValues() } else if (.isTrialDesignInverseNormal(designMaster)) { defaultValues <- .getDesignInverseNormalDefaultValues() } else if (.isTrialDesignFisher(designMaster)) { defaultValues <- .getDesignFisherDefaultValues() } for (userDefinedParamName in designMaster$.getUserDefinedParameters()) { defaultValues[[userDefinedParamName]] <- designMaster[[userDefinedParamName]] } defaultValues[[argumentName]] <- argumentValue if (.isTrialDesignGroupSequential(designMaster)) { result <- getDesignGroupSequential( kMax = defaultValues$kMax, alpha = defaultValues$alpha, beta = defaultValues$beta, sided = defaultValues$sided, informationRates = defaultValues$informationRates, futilityBounds = defaultValues$futilityBounds, typeOfDesign = defaultValues$typeOfDesign, deltaWT = defaultValues$deltaWT, optimizationCriterion = defaultValues$optimizationCriterion, gammaA = defaultValues$gammaA, typeBetaSpending = defaultValues$typeBetaSpending, userAlphaSpending = defaultValues$userAlphaSpending, userBetaSpending = defaultValues$userBetaSpending, gammaB = defaultValues$gammaB, tolerance = defaultValues$tolerance ) } else if (.isTrialDesignInverseNormal(designMaster)) { result <- getDesignInverseNormal( kMax = defaultValues$kMax, alpha = defaultValues$alpha, beta = defaultValues$beta, sided = defaultValues$sided, informationRates = defaultValues$informationRates, futilityBounds = defaultValues$futilityBounds, typeOfDesign = defaultValues$typeOfDesign, deltaWT = defaultValues$deltaWT, optimizationCriterion = defaultValues$optimizationCriterion, gammaA = defaultValues$gammaA, typeBetaSpending = defaultValues$typeBetaSpending, userAlphaSpending = defaultValues$userAlphaSpending, userBetaSpending = defaultValues$userBetaSpending, gammaB = defaultValues$gammaB, tolerance = defaultValues$tolerance ) } else if (.isTrialDesignFisher(designMaster)) { result <- getDesignFisher( kMax = defaultValues$kMax, alpha = defaultValues$alpha, method = defaultValues$method, userAlphaSpending = defaultValues$userAlphaSpending, informationRates = defaultValues$informationRates, alpha0Vec = defaultValues$alpha0Vec, sided = defaultValues$sided, tolerance = defaultValues$tolerance, iterations = defaultValues$iterations, seed = defaultValues$seed ) } result$.plotSettings <- designMaster$.plotSettings return(result) } ) ) #' #' @title #' Access Trial Design by Index #' #' @description #' Function to the \code{TrialDesign} at position \code{i} in a \code{TrialDesignSet} object. #' #' @details #' Can be used to iterate over all designs in a design set. #' #' @examples #' designSet <- getDesignSet(design = getDesignFisher(), alpha = c(0.01, 0.05)) #' for (i in 1:length(designSet)) { #' print(designSet[i]$alpha) #' } #' #' @export #' #' @keywords internal #' setMethod( "[", "TrialDesignSet", function(x, i, j = NA_character_, ...) { if (length(x$designs) == 0) { return(NULL) } design <- x$designs[[i]] if (!missing(j) && !is.na(j) && is.character(j)) { return(design[[j]]) } return(design) } ) #' #' @title #' Names of a Trial Design Set Object #' #' @description #' Function to get the names of a \code{\link{TrialDesignSet}} object. #' #' @param x A \code{\link{TrialDesignSet}} object. #' #' @details #' Returns the names of a design set that can be accessed by the user. #' #' @template return_names #' #' @examples #' designSet <- getDesignSet(design = getDesignGroupSequential(), alpha = c(0.01, 0.05)) #' names(designSet) #' #' @export #' #' @keywords internal #' names.TrialDesignSet <- function(x) { return(x$.getVisibleFieldNames()) } #' #' @title #' Length of Trial Design Set #' #' @description #' Returns the number of designs in a \code{TrialDesignSet}. #' #' @param x A \code{\link{TrialDesignSet}} object. #' #' @details #' Is helpful for iteration over all designs in a design set. #' #' @return Returns a non-negative \code{\link[base]{integer}} of length 1 #' representing the number of design in the \code{TrialDesignSet}. #' #' @examples #' designSet <- getDesignSet(design = getDesignGroupSequential(), alpha = c(0.01, 0.05)) #' length(designSet) #' #' @export #' #' @keywords internal #' length.TrialDesignSet <- function(x) { return(length(x$designs)) } #' #' @title #' Coerce Trial Design Set to a Data Frame #' #' @description #' Returns the \code{TrialDesignSet} as data frame. #' #' @param x A \code{\link{TrialDesignSet}} object. #' @inheritParams param_niceColumnNamesEnabled #' @inheritParams param_includeAllParameters #' @param addPowerAndAverageSampleNumber If \code{TRUE}, power and average sample size will #' be added to data frame, default is \code{FALSE}. #' @inheritParams param_theta #' @inheritParams param_nMax #' @inheritParams param_three_dots #' #' @details #' Coerces the design set to a data frame. #' #' @template return_dataframe #' #' @examples #' designSet <- getDesignSet(design = getDesignGroupSequential(), alpha = c(0.01, 0.05)) #' as.data.frame(designSet) #' #' @export #' #' @keywords internal #' as.data.frame.TrialDesignSet <- function(x, row.names = NULL, optional = FALSE, niceColumnNamesEnabled = FALSE, includeAllParameters = FALSE, addPowerAndAverageSampleNumber = FALSE, theta = seq(-1, 1, 0.02), nMax = NA_integer_, ...) { .assertIsTrialDesignSet(x) if (x$isEmpty()) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "cannot create data.frame because the design set is empty") } fCall <- match.call(expand.dots = FALSE) theta <- .assertIsValidThetaRange(thetaRange = theta, thetaAutoSeqEnabled = (as.character(fCall$theta)[1] != "seq")) if (addPowerAndAverageSampleNumber) { .assertAssociatedArgumentsAreDefined( addPowerAndAverageSampleNumber = addPowerAndAverageSampleNumber, theta = theta, nMax = nMax ) } fisherDesignEnabled <- .isTrialDesignFisher(x$getDesignMaster()) dataFrame <- NULL for (design in x$designs) { if (fisherDesignEnabled != .isTrialDesignFisher(design)) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "all trial designs must be from the same type ", "('", .getClassName(x$designs[[1]]), "' != '", .getClassName(design), ")'" ) } suppressWarnings(df <- as.data.frame(design, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters )) if (.isTrialDesignWithValidFutilityBounds(design)) { futilityBoundsName <- "futilityBounds" if (niceColumnNamesEnabled) { futilityBoundsName <- .getTableColumnNames(design = design)[["futilityBounds"]] } kMax <- design$kMax df[[futilityBoundsName]][kMax] <- design$criticalValues[kMax] } if (.isTrialDesignWithValidAlpha0Vec(design)) { alpha0VecName <- "alpha0Vec" if (niceColumnNamesEnabled) { alpha0VecName <- .getTableColumnNames(design = design)[["alpha0Vec"]] } kMax <- design$kMax df[[alpha0VecName]][kMax] <- design$criticalValues[kMax] } if (addPowerAndAverageSampleNumber) { results <- PowerAndAverageSampleNumberResult(design, theta = theta, nMax = nMax) suppressWarnings(df2 <- as.data.frame(results, niceColumnNamesEnabled = niceColumnNamesEnabled, includeAllParameters = includeAllParameters )) df <- merge(df, df2, all.y = TRUE) } if (is.null(dataFrame)) { if (niceColumnNamesEnabled) { dataFrame <- cbind("Design number" = rep(1, nrow(df)), df) } else { dataFrame <- cbind(designNumber = rep(1, nrow(df)), df) } } else { if (niceColumnNamesEnabled) { df <- cbind("Design number" = rep(max(dataFrame$"Design number") + 1, nrow(df)), df) } else { df <- cbind(designNumber = rep(max(dataFrame$designNumber) + 1, nrow(df)), df) } dataFrame <- rbind(dataFrame, df) } } return(dataFrame) } #' #' @title #' Trial Design Set Plotting #' #' @description #' Plots a trial design set. #' #' @param x The trial design set, obtained from \code{\link[=getDesignSet]{getDesignSet()}}. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @inheritParams param_palette #' @inheritParams param_theta #' @inheritParams param_nMax #' @inheritParams param_plotPointsEnabled #' @inheritParams param_showSource #' @inheritParams param_plotSettings #' @inheritParams param_legendPosition #' @inheritParams param_grid #' @param type The plot type (default = \code{1}). The following plot types are available: #' \itemize{ #' \item \code{1}: creates a 'Boundaries' plot #' \item \code{3}: creates a 'Stage Levels' plot #' \item \code{4}: creates a 'Error Spending' plot #' \item \code{5}: creates a 'Power and Early Stopping' plot #' \item \code{6}: creates an 'Average Sample Size and Power / Early Stop' plot #' \item \code{7}: creates an 'Power' plot #' \item \code{8}: creates an 'Early Stopping' plot #' \item \code{9}: creates an 'Average Sample Size' plot #' \item \code{"all"}: creates all available plots and returns it as a grid plot or list #' } #' @inheritParams param_three_dots_plot #' #' @details #' Generic function to plot a trial design set. #' Is, e.g., useful to compare different designs or design parameters visual. #' #' @template return_object_ggplot #' #' @examples #' \dontrun{ #' design <- getDesignInverseNormal( #' kMax = 3, alpha = 0.025, #' typeOfDesign = "asKD", gammaA = 2, #' informationRates = c(0.2, 0.7, 1), typeBetaSpending = "bsOF" #' ) #' #' # Create a set of designs based on the master design defined above #' # and varied parameter 'gammaA' #' designSet <- getDesignSet(design = design, gammaA = 4) #' #' if (require(ggplot2)) plot(designSet, type = 1, legendPosition = 6) #' } #' #' @export #' plot.TrialDesignSet <- function(x, y, ..., type = 1L, main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, grid = 1, plotSettings = NULL) { fCall <- match.call(expand.dots = FALSE) designSetName <- deparse(fCall$x) .assertGgplotIsInstalled() .assertIsSingleInteger(grid, "grid", validateType = FALSE) typeNumbers <- .getPlotTypeNumber(type, x) if (is.null(plotSettings)) { plotSettings <- .getGridPlotSettings(x, typeNumbers, grid) } p <- NULL plotList <- list() for (typeNumber in typeNumbers) { p <- .plotTrialDesignSet( x = x, y = y, type = typeNumber, main = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = .getGridLegendPosition(legendPosition, typeNumbers, grid), showSource = showSource, designSetName = designSetName, plotSettings = plotSettings, ... ) .printPlotShowSourceSeparator(showSource, typeNumber, typeNumbers) if (length(typeNumbers) > 1) { caption <- .getPlotCaption(x, typeNumber, stopIfNotFound = TRUE) plotList[[caption]] <- p } } if (length(typeNumbers) == 1) { return(p) } return(.createPlotResultObject(plotList, grid)) } .plotTrialDesignSet <- function(..., x, y, type = 1L, main = NA_character_, xlab = NA_character_, ylab = NA_character_, palette = "Set1", theta = seq(-1, 1, 0.02), nMax = NA_integer_, plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, designSetName = NA_character_, plotSettings = NULL) { .assertGgplotIsInstalled() if (!is.call(main) && !isS4(main)) { .assertIsSingleCharacter(main, "main", naAllowed = TRUE) } .assertIsSingleCharacter(xlab, "xlab", naAllowed = TRUE) .assertIsSingleCharacter(ylab, "ylab", naAllowed = TRUE) .assertIsSingleCharacter(palette, "palette", naAllowed = TRUE) theta <- .assertIsValidThetaRange(thetaRange = theta) .assertIsSingleNumber(nMax, "nMax", naAllowed = TRUE) .assertIsInClosedInterval(nMax, "nMax", lower = 1L, upper = 1e10) .assertIsSingleLogical(plotPointsEnabled, "plotPointsEnabled", naAllowed = TRUE) .assertIsValidLegendPosition(legendPosition) .assertIsSingleInteger(type, "type", naAllowed = FALSE, validateType = FALSE) parameterSet <- x designMaster <- parameterSet$getDesignMaster() .assertIsTrialDesign(designMaster) if (type == 1) { main <- if (!is.call(main) && !isS4(main) && is.na(main)) "Boundaries" else main xParameterName <- "informationRates" yParameterNames <- "criticalValues" if (designMaster$sided == 1 || (.isTrialDesignInverseNormalOrGroupSequential(designMaster) && (designMaster$typeOfDesign == C_TYPE_OF_DESIGN_PT || grepl("^bs", designMaster$typeBetaSpending)))) { if (.isTrialDesignWithValidFutilityBounds(designMaster)) { yParameterNames <- c("futilityBounds", yParameterNames) } if (.isTrialDesignWithValidAlpha0Vec(designMaster)) { yParameterNames <- c("alpha0Vec", yParameterNames) } } } else if (type == 2) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "designs with undefined endpoint do not support plot type 2") } else if (type == 3) { main <- if (!is.call(main) && !isS4(main) && is.na(main)) "Stage Levels" else main xParameterName <- "informationRates" yParameterNames <- "stageLevels" } else if (type == 4) { main <- if (!is.call(main) && !isS4(main) && is.na(main)) "Error Spending" else main xParameterName <- "informationRates" yParameterNames <- c("alphaSpent") if (!.isTrialDesignFisher(designMaster) && designMaster$typeBetaSpending != C_TYPE_OF_DESIGN_BS_NONE) { yParameterNames <- c(yParameterNames, "betaSpent") palette <- "Paired" } plotPointsEnabled <- ifelse(is.na(plotPointsEnabled), FALSE, plotPointsEnabled) } else if (type == 5) { if (!is.call(main) && !isS4(main) && is.na(main)) { main <- PlotSubTitleItems(title = "Power and Early Stopping") main$add("N", nMax, "max") } xParameterName <- "theta" yParameterNames <- c("overallEarlyStop", "calculatedPower") } else if (type == 6) { if (!is.call(main) && !isS4(main) && is.na(main)) { main <- PlotSubTitleItems(title = "Average Sample Size and Power / Early Stop") main$add("N", nMax, "max") } xParameterName <- "theta" yParameterNames <- c("averageSampleNumber", "overallEarlyStop", "calculatedPower") } else if (type == 7) { if (!is.call(main) && !isS4(main) && is.na(main)) { main <- PlotSubTitleItems(title = "Power") main$add("N", nMax, "max") } xParameterName <- "theta" yParameterNames <- "calculatedPower" } else if (type == 8) { if (!is.call(main) && !isS4(main) && is.na(main)) { main <- PlotSubTitleItems(title = "Early Stopping") main$add("N", nMax, "max") } xParameterName <- "theta" yParameterNames <- "overallEarlyStop" } else if (type == 9) { if (!is.call(main) && !isS4(main) && is.na(main)) { main <- PlotSubTitleItems(title = "Average Sample Size") main$add("N", nMax, "max") } xParameterName <- "theta" yParameterNames <- "averageSampleNumber" } else { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'type' (", type, ") is not allowed; must be 1, 2, ..., 9") } if (type >= 5 && type <= 9) { designSetName <- paste0( "getPowerAndAverageSampleNumber(", designSetName, ", theta = ", .reconstructSequenceCommand(theta), ", nMax = ", nMax, ")" ) } xValues <- NA_real_ if (xParameterName == "theta") { xValues <- theta } srcCmd <- .showPlotSourceInformation( objectName = designSetName, xParameterName = xParameterName, yParameterNames = yParameterNames, nMax = nMax, type = type, showSource = showSource, xValues = xValues ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } p <- .plotParameterSet( parameterSet = parameterSet, designMaster = designMaster, xParameterName = xParameterName, yParameterNames = yParameterNames, mainTitle = main, xlab = xlab, ylab = ylab, palette = palette, theta = theta, nMax = nMax, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, plotSettings = plotSettings # , ... ) p <- .addDecistionCriticalValuesToPlot(p = p, designMaster = designMaster, type = type, nMax = nMax) return(p) } .addDecistionCriticalValuesToPlot <- function(p, designMaster, type, nMax = NA_integer_) { if (type != 1 || !.isTrialDesignInverseNormalOrGroupSequential(designMaster)) { return(p) } data <- as.data.frame(designMaster) xyNames <- c("delayedInformationRates", "decisionCriticalValues") if (!all(xyNames %in% colnames(data))) { return(p) } data <- unique(na.omit(data[, xyNames])) data$legend <- rep("Decision critical value", nrow(data)) if (!is.na(nMax) && nMax > 1) { data$delayedInformationRates <- data$delayedInformationRates * nMax tryCatch( { data$delayedInformationRates <- as.numeric(.formatSampleSizes(data$delayedInformationRates)) }, error = function(e) { warning("Failed to format delayed information rates on x-axis: ", e$message) } ) } plotSettings <- designMaster$.plotSettings p <- p + ggplot2::geom_point( data = data, mapping = ggplot2::aes( x = .data[["delayedInformationRates"]], y = .data[["decisionCriticalValues"]], colour = .data[["legend"]] ), size = plotSettings$scaleSize(plotSettings$pointSize, TRUE), shape = 4, stroke = 1.25, show.legend = FALSE ) for (i in 1:nrow(data)) { label <- paste0("(", round(data[i, 1], 3), ", ", round(data[i, 2], 3), ")") p <- p + ggplot2::annotate("text", x = data[i, 1], y = data[i, 2], label = label, vjust = plotSettings$scaleSize(3.0), size = plotSettings$scaleSize(2.5) ) } try(suppressWarnings(suppressMessages(p <- p + ggplot2::scale_color_manual(values = c("#4daf4a", "#377eb8", "#e41a1c"))))) return(p) } rpact/R/f_analysis_multiarm.R0000644000176200001440000015537014445307575016002 0ustar liggesusers## | ## | *Analysis of multi-arm designs with adaptive test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R NULL #' #' @title #' Get Multi-Armed Analysis Results #' #' @description #' Calculates and returns the analysis results for the specified design and data. #' #' @noRd #' .getAnalysisResultsMultiArm <- function(design, dataInput, ..., intersectionTest = C_INTERSECTION_TEST_MULTIARMED_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = NA_real_, nPlanned = NA_real_) { .assertIsTrialDesignInverseNormalOrFisherOrConditionalDunnett(design) .assertIsValidIntersectionTestMultiArm(design, intersectionTest) .assertIsOneSidedDesign(design, designType = "multi-arm", engineType = "analysis") stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design, showWarnings = TRUE) .assertIsSingleLogical(directionUpper, "directionUpper") .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) on.exit(dataInput$.trim()) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertIsValidNPlanned(nPlanned, design$kMax, stage, required = FALSE) intersectionTest <- .getCorrectedIntersectionTestMultiArmIfNecessary(design, intersectionTest) if (dataInput$isDatasetMeans()) { if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_MEANS_DEFAULT } return(.getAnalysisResultsMeansMultiArm( design = design, dataInput = dataInput, intersectionTest = intersectionTest, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, stage = stage, ... )) } if (dataInput$isDatasetRates()) { if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_RATES_DEFAULT } return(.getAnalysisResultsRatesMultiArm( design = design, dataInput = dataInput, intersectionTest = intersectionTest, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, stage = stage, ... )) } if (dataInput$isDatasetSurvival()) { if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_SURVIVAL_DEFAULT } return(.getAnalysisResultsSurvivalMultiArm( design = design, dataInput = dataInput, intersectionTest = intersectionTest, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, stage = stage, ... )) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(dataInput), "' is not implemented yet") } #' #' Get Stage Results #' Returns summary statistics and p-values for a given data set and a given multi-arm design. #' #' @noRd #' .getStageResultsMultiArm <- function(design, dataInput, ...) { .assertIsTrialDesignInverseNormalOrFisherOrConditionalDunnett(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) on.exit(dataInput$.trim()) if (dataInput$isDatasetMeans()) { return(.getStageResultsMeansMultiArm(design = design, dataInput = dataInput, userFunctionCallEnabled = TRUE, ...)) } if (dataInput$isDatasetRates()) { return(.getStageResultsRatesMultiArm(design = design, dataInput = dataInput, userFunctionCallEnabled = TRUE, ...)) } if (dataInput$isDatasetSurvival()) { return(.getStageResultsSurvivalMultiArm(design = design, dataInput = dataInput, userFunctionCallEnabled = TRUE, ...)) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(dataInput), "' is not supported") } #' #' Get Repeated Confidence Intervals for multi-arm case #' Calculates and returns the lower and upper limit of the repeated confidence intervals of the trial for multi-arm designs. #' #' @noRd #' .getRepeatedConfidenceIntervalsMultiArm <- function(design, dataInput, ...) { .assertIsTrialDesignInverseNormalOrFisherOrConditionalDunnett(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) on.exit(dataInput$.trim()) if (dataInput$isDatasetMeans()) { return(.getRepeatedConfidenceIntervalsMeansMultiArm( design = design, dataInput = dataInput, ... )) } if (dataInput$isDatasetRates()) { return(.getRepeatedConfidenceIntervalsRatesMultiArm( design = design, dataInput = dataInput, ... )) } if (dataInput$isDatasetSurvival()) { return(.getRepeatedConfidenceIntervalsSurvivalMultiArm( design = design, dataInput = dataInput, ... )) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(dataInput), "' is not implemented yet") } #' #' Get Conditional Power for multi-arm case #' Calculates and returns the conditional power for multi-arm case. #' #' @keywords internal #' #' @noRd #' .getConditionalPowerMultiArm <- function(..., stageResults, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT) { .assertIsStageResults(stageResults) if (stageResults$isDatasetMeans()) { if ("assumedStDev" %in% names(list(...))) { warning("For multi-arm analysis the argument for assumed standard deviation ", "is named 'assumedStDevs' and not 'assumedStDev'", call. = FALSE ) } return(.getConditionalPowerMeansMultiArm( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } if (stageResults$isDatasetRates()) { return(.getConditionalPowerRatesMultiArm( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } if (stageResults$isDatasetSurvival()) { return(.getConditionalPowerSurvivalMultiArm( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(stageResults$.dataInput), "' is not implemented yet" ) } .getIndicesOfClosedHypothesesSystem <- function(gMax) { indices <- as.data.frame(expand.grid(rep(list(1:0), gMax)))[1:(2^gMax - 1), ] if (gMax == 1) { return(as.matrix(indices)) } y <- 10^(ncol(indices):1) indices$pos <- (as.matrix(indices) %*% y / 10) indices$sum <- as.numeric(rowSums(indices[, 1:gMax])) indices <- indices[order(indices$sum, indices$pos, decreasing = c(TRUE, TRUE)), ] indices <- indices[, 1:gMax] rownames(indices) <- as.character(1:nrow(indices)) return(as.matrix(indices)) } .getMultivariateDistribution <- function(..., type = c("normal", "t", "quantile"), upper, sigma, df = NA_real_, alpha = NA_real_) { .assertMnormtIsInstalled() type <- match.arg(type) dimensionSigma <- length(base::diag(sigma)) if (type == "normal") { if (dimensionSigma == 1) { return(stats::pnorm(upper)) } return(mnormt::sadmvn(lower = -Inf, upper = upper, mean = 0, varcov = sigma)) } if (type == "t") { if (dimensionSigma == 1) { return(stats::pt(upper, df)) } if (df > 500) { return(mnormt::sadmvn(lower = -Inf, upper = upper, mean = 0, varcov = sigma)) } return(mnormt::sadmvt(lower = -Inf, upper = upper, mean = 0, S = sigma, df = df)) } if (type == "quantile") { if (dimensionSigma == 1) { return(.getOneMinusQNorm(alpha)) } return(.getOneDimensionalRoot( function(x) { return(mnormt::sadmvn(lower = -Inf, upper = x, mean = 0, varcov = sigma) - (1 - alpha)) }, lower = -8, upper = 8, tolerance = 1e-08, callingFunctionInformation = ".getMultivariateDistribution" )) } } .performClosedCombinationTest <- function(..., stageResults, design = stageResults$.design, intersectionTest = stageResults$intersectionTest) { dataInput <- stageResults$.dataInput stage <- stageResults$stage gMax <- stageResults$getGMax() kMax <- design$kMax indices <- .getIndicesOfClosedHypothesesSystem(gMax = gMax) adjustedStageWisePValues <- matrix(NA_real_, nrow = 2^gMax - 1, ncol = kMax) adjustedOverallPValues <- matrix(NA_real_, nrow = 2^gMax - 1, ncol = kMax) overallAdjustedTestStatistics <- matrix(NA_real_, nrow = 2^gMax - 1, ncol = kMax) rejected <- matrix(NA, nrow = gMax, ncol = kMax) colnames(adjustedStageWisePValues) <- paste("stage ", (1:kMax), sep = "") colnames(overallAdjustedTestStatistics) <- paste("stage ", (1:kMax), sep = "") dimnames(rejected) <- list(paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) rejectedIntersections <- matrix(rep(FALSE, stage * nrow(indices)), nrow(indices), stage) rejectedIntersectionsBefore <- matrix(rep(FALSE, nrow(indices)), nrow(indices), 1) if (.isTrialDesignFisher(design)) { weightsFisher <- .getWeightsFisher(design) } else { weightsInverseNormal <- .getWeightsInverseNormal(design) } for (k in 1:stage) { for (i in 1:(2^gMax - 1)) { if (!all(is.na(stageResults$separatePValues[indices[i, ] == 1, k]))) { if ((intersectionTest == "Dunnett") || (intersectionTest == "SpiessensDebois")) { sigma <- 1 if (grepl("MultiArm", .getClassName(stageResults))) { if (.isStageResultsMultiArmSurvival(stageResults)) { allocationRatiosSelected <- as.numeric(na.omit( dataInput$getAllocationRatios(stage = k, group = 1:gMax)[indices[i, ] == 1] )) sigma <- sqrt(allocationRatiosSelected / (1 + allocationRatiosSelected)) %*% sqrt(t(allocationRatiosSelected / (1 + allocationRatiosSelected))) } else { sampleSizesSelected <- as.numeric(na.omit( dataInput$getSampleSizes(stage = k, group = 1:gMax)[indices[i, ] == 1] )) sigma <- sqrt(sampleSizesSelected / (sampleSizesSelected + dataInput$getSampleSizes(stage = k, group = gMax + 1))) %*% sqrt(t(sampleSizesSelected / (sampleSizesSelected + dataInput$getSampleSizes(stage = k, group = gMax + 1)))) } } else { if (.isStageResultsEnrichmentSurvival(stageResults)) { eventsSelected <- as.numeric(na.omit( dataInput$getEvents(stage = k, group = 1)[indices[i, ] == 1] )) if (length(eventsSelected) == 2) { if (dataInput$isStratified()) { sigma <- matrix(rep(sqrt(dataInput$getEvents(stage = k, subset = "S1") / sum(dataInput$getEvents(stage = k))), 4), nrow = 2) } else { sigma <- matrix(rep(sqrt(dataInput$getEvents(stage = k, subset = "S1") / dataInput$getEvents(stage = k, subset = "F")), 4), nrow = 2) } } } else { sampleSizesSelected <- as.numeric(na.omit( dataInput$getSampleSizes(stage = k, group = 1)[indices[i, ] == 1] )) if (length(sampleSizesSelected) == 2) { if (dataInput$isStratified()) { sigma <- matrix(rep(sqrt(sum(dataInput$getSampleSizes(stage = k, subset = "S1")) / sum(dataInput$getSampleSizes(stage = k))), 4), nrow = 2) } else { sigma <- matrix(rep(sqrt(sum(dataInput$getSampleSizes(stage = k, subset = "S1")) / sum(dataInput$getSampleSizes(stage = k, subset = "F"))), 4), nrow = 2) } } } } if (is.matrix(sigma)) { diag(sigma) <- 1 } if (stageResults$directionUpper) { maxTestStatistic <- max(stageResults$testStatistics[indices[i, ] == 1, k], na.rm = TRUE) } else { maxTestStatistic <- max(-stageResults$testStatistics[indices[i, ] == 1, k], na.rm = TRUE) } df <- NA_real_ if (.isStageResultsMultiArmMeans(stageResults)) { if (!stageResults$normalApproximation) { df <- sum(dataInput$getSampleSizes(stage = k) - 1, na.rm = TRUE) } adjustedStageWisePValues[i, k] <- 1 - .getMultivariateDistribution( type = ifelse(stageResults$normalApproximation, "normal", "t"), upper = maxTestStatistic, sigma = sigma, df = df ) } else if (.isStageResultsEnrichmentMeans(stageResults)) { if (length(sampleSizesSelected) == 1) { adjustedStageWisePValues[i, k] <- stageResults$separatePValues[min(which(indices[i, ] == 1)), k] } else { if (!stageResults$normalApproximation) { if (dataInput$isStratified()) { df <- sum(dataInput$getSampleSizes(stage = k) - 1, na.rm = TRUE) } else { df <- sum(dataInput$getSampleSizes(stage = k, subset = "F") - 2, na.rm = TRUE) } } adjustedStageWisePValues[i, k] <- 1 - .getMultivariateDistribution( type = ifelse(stageResults$normalApproximation, "normal", "t"), upper = maxTestStatistic, sigma = sigma, df = df ) } } else { adjustedStageWisePValues[i, k] <- 1 - .getMultivariateDistribution( type = "normal", upper = maxTestStatistic, sigma = sigma, df = df ) } } # Bonferroni adjusted p-values else if (intersectionTest == "Bonferroni") { adjustedStageWisePValues[i, k] <- min(c(sum(indices[ i, !is.na(stageResults$separatePValues[, k]) ]) * min(stageResults$separatePValues[indices[i, ] == 1, k], na.rm = TRUE), 1)) } # Simes adjusted p-values else if (intersectionTest == "Simes") { adjustedStageWisePValues[i, k] <- min(sum(indices[ i, !is.na(stageResults$separatePValues[, k]) ]) / (1:sum(indices[i, !is.na(stageResults$separatePValues[, k])])) * sort(stageResults$separatePValues[indices[i, ] == 1, k])) } # Sidak adjusted p-values else if (intersectionTest == "Sidak") { adjustedStageWisePValues[i, k] <- 1 - (1 - min(stageResults$separatePValues[indices[i, ] == 1, k], na.rm = TRUE))^ sum(indices[i, !is.na(stageResults$separatePValues[, k])]) } # Hierarchically ordered hypotheses else if (intersectionTest == "Hierarchical") { separatePValues <- stageResults$separatePValues separatePValues[is.na(separatePValues[, 1:stage])] <- 1 adjustedStageWisePValues[i, k] <- separatePValues[min(which(indices[i, ] == 1)), k] } if (.isTrialDesignFisher(design)) { overallAdjustedTestStatistics[i, k] <- prod(adjustedStageWisePValues[i, 1:k]^weightsFisher[1:k]) } else { overallAdjustedTestStatistics[i, k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(adjustedStageWisePValues[i, 1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) } } } if (.isTrialDesignFisher(design)) { rejectedIntersections[, k] <- (overallAdjustedTestStatistics[, k] <= design$criticalValues[k]) } else { rejectedIntersections[, k] <- (overallAdjustedTestStatistics[, k] >= design$criticalValues[k]) } rejectedIntersections[is.na(rejectedIntersections[, k]), k] <- FALSE rejectedIntersections[, k] <- rejectedIntersections[, k] | rejectedIntersectionsBefore rejectedIntersectionsBefore <- matrix(rejectedIntersections[, k], ncol = 1) for (j in 1:gMax) { rejected[j, k] <- all(rejectedIntersections[indices[, j] == 1, k], na.rm = TRUE) } } return(list( .design = design, intersectionTest = intersectionTest, separatePValues = stageResults$separatePValues, indices = indices, adjustedStageWisePValues = adjustedStageWisePValues, overallAdjustedTestStatistics = overallAdjustedTestStatistics, rejected = rejected, rejectedIntersections = rejectedIntersections )) } #' #' @title #' Get Closed Combination Test Results #' #' @description #' Calculates and returns the results from the closed combination test in multi-arm #' and population enrichment designs. #' #' @inheritParams param_stageResults #' #' @family analysis functions #' #' @template return_object_closed_combination_test_results #' @template how_to_get_help_for_generics #' #' @template examples_get_closed_combination_test_results #' #' @export #' getClosedCombinationTestResults <- function(stageResults) { .assertIsTrialDesignInverseNormalOrFisher(stageResults$.design) result <- .performClosedCombinationTest(stageResults = stageResults) return(ClosedCombinationTestResults( .design = result$.design, .enrichment = grepl("Enrichment", .getClassName(stageResults)), intersectionTest = result$intersectionTest, separatePValues = result$separatePValues, indices = result$indices, adjustedStageWisePValues = result$adjustedStageWisePValues, overallAdjustedTestStatistics = result$overallAdjustedTestStatistics, rejected = result$rejected, rejectedIntersections = result$rejectedIntersections )) } #' #' Repeated p-values for multi-arm designs #' #' @noRd #' .getRepeatedPValuesMultiArm <- function(stageResults, ..., tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = "getRepeatedPValuesMultiArm", ...) design <- stageResults$.design gMax <- stageResults$getGMax() kMax <- design$kMax repeatedPValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) if (.isTrialDesignInverseNormal(design)) { if (design$typeOfDesign == C_TYPE_OF_DESIGN_AS_USER) { warning("Repeated p-values not available for 'typeOfDesign' = '", C_TYPE_OF_DESIGN_AS_USER, "'", call. = FALSE ) return(repeatedPValues) } if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT_OPTIMUM) { warning("Repeated p-values not available for 'typeOfDesign' = '", C_TYPE_OF_DESIGN_WT_OPTIMUM, "'", call. = FALSE ) return(repeatedPValues) } } if (.isTrialDesignFisher(design) && design$method == C_FISHER_METHOD_USER_DEFINED_ALPHA) { warning("Repeated p-values not available for 'method' = '", C_FISHER_METHOD_USER_DEFINED_ALPHA, "'", call. = FALSE ) return(repeatedPValues) } startTime <- Sys.time() stage <- stageResults$stage if (.isTrialDesignConditionalDunnett(design)) { if (stage == 1 || stage > 2) { message("Repeated p-values can only be calculated for the second stage") return(repeatedPValues) } for (g in 1:gMax) { if (!is.na(stageResults$testStatistics[g, 2])) { prec <- 1 lower <- tolerance upper <- 0.5 maxSearchIterations <- 30 while (prec > tolerance && maxSearchIterations >= 0) { alpha <- (lower + upper) / 2 ctr <- .getClosedConditionalDunnettTestResults( design = getDesignConditionalDunnett( alpha = alpha, informationAtInterim = design$informationAtInterim, secondStageConditioning = design$secondStageConditioning ), stageResults = stageResults, stage = stage ) ifelse(ctr$rejected[g, 2], upper <- alpha, lower <- alpha) prec <- upper - lower maxSearchIterations <- maxSearchIterations - 1 } repeatedPValues[g, 2] <- upper } } .logProgress("Repeated p-values for final stage calculated", startTime = startTime) return(repeatedPValues) } if (.isTrialDesignInverseNormal(design)) { typeOfDesign <- design$typeOfDesign deltaWT <- design$deltaWT typeBetaSpending <- design$typeBetaSpending if (!design$bindingFutility) { if (design$typeOfDesign == C_TYPE_OF_DESIGN_PT) { typeOfDesign <- C_TYPE_OF_DESIGN_WT deltaWT <- design$deltaPT1 } if (design$typeBetaSpending != "none") { typeBetaSpending <- "none" } } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_PT || design$typeBetaSpending != "none") { message("Calculation of repeated p-values might take a while for binding case, please wait...") } } intersectionTest <- stageResults$intersectionTest if (!.isTrialDesignFisher(design) && (design$typeOfDesign == C_TYPE_OF_DESIGN_HP)) { if (stage == kMax) { startTime <- Sys.time() for (g in 1:gMax) { if (!is.na(stageResults$testStatistics[g, kMax])) { prec <- 1 lower <- .getDesignGroupSequential( kMax = kMax, sided = design$sided, informationRates = design$informationRates, typeOfDesign = C_TYPE_OF_DESIGN_HP, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility )$alphaSpent[kMax - 1] + tolerance upper <- 0.5 maxSearchIterations <- 30 while (prec > tolerance && maxSearchIterations >= 0) { alpha <- (lower + upper) / 2 designAlpha <- .getDesignInverseNormal( kMax = kMax, alpha = alpha, typeOfDesign = C_TYPE_OF_DESIGN_HP, futilityBounds = design$futilityBounds, sided = design$sided, bindingFutility = design$bindingFutility, informationRates = design$informationRates ) ctr <- .performClosedCombinationTest( stageResults = stageResults, design = designAlpha, intersectionTest = intersectionTest ) ifelse(ctr$rejected[g, kMax], upper <- alpha, lower <- alpha) prec <- upper - lower maxSearchIterations <- maxSearchIterations - 1 } repeatedPValues[g, kMax] <- upper } } .logProgress("Repeated p-values for final stage calculated", startTime = startTime) } } else if (kMax == 1) { startTime <- Sys.time() for (g in 1:gMax) { if (!is.na(stageResults$testStatistics[g, 1])) { prec <- 1 lower <- tolerance upper <- 1 - tolerance maxSearchIterations <- 30 while (prec > tolerance && maxSearchIterations >= 0) { alpha <- (lower + upper) / 2 if (.isTrialDesignFisher(design)) { designAlpha <- .getDesignFisher(kMax = 1, alpha = alpha) } else { designAlpha <- .getDesignInverseNormal(kMax = 1, alpha = alpha) } ctr <- .performClosedCombinationTest( stageResults = stageResults, design = designAlpha, intersectionTest = intersectionTest ) ifelse(ctr$rejected[g, 1], upper <- alpha, lower <- alpha) prec <- upper - lower maxSearchIterations <- maxSearchIterations - 1 } repeatedPValues[g, 1] <- upper } } .logProgress("Overall p-values calculated", startTime = startTime) } else { for (k in 1:stage) { startTime <- Sys.time() for (g in 1:gMax) { if (!is.na(stageResults$testStatistics[g, k])) { prec <- 1 lower <- tolerance upper <- 0.5 maxSearchIterations <- 30 while (prec > tolerance && maxSearchIterations >= 0) { alpha <- (lower + upper) / 2 if (.isTrialDesignFisher(design)) { designAlpha <- .getDesignFisher( kMax = kMax, alpha = alpha, method = design$method, alpha0Vec = design$alpha0Vec, sided = design$sided, bindingFutility = design$bindingFutility, informationRates = design$informationRates ) } else { designAlpha <- .getDesignInverseNormal( kMax = kMax, alpha = alpha, typeOfDesign = typeOfDesign, deltaWT = deltaWT, typeBetaSpending = typeBetaSpending, gammaB = design$gammaB, deltaPT0 = design$deltaPT0, deltaPT1 = design$deltaPT1, beta = design$beta, gammaA = design$gammaA, futilityBounds = design$futilityBounds, sided = design$sided, bindingFutility = design$bindingFutility, informationRates = design$informationRates ) } ctr <- .performClosedCombinationTest( stageResults = stageResults, design = designAlpha, intersectionTest = intersectionTest ) ifelse(ctr$rejected[g, k], upper <- alpha, lower <- alpha) prec <- upper - lower maxSearchIterations <- maxSearchIterations - 1 } repeatedPValues[g, k] <- upper } } .logProgress("Repeated p-values for stage %s calculated", startTime = startTime, k) } } return(repeatedPValues) } #' #' @title #' Get Closed Conditional Dunnett Test Results #' #' @description #' Calculates and returns the results from the closed conditional Dunnett test. #' #' @inheritParams param_stageResults #' @inheritParams param_stage #' @inheritParams param_three_dots #' #' @family analysis functions #' @details #' For performing the conditional Dunnett test the design must be defined through the function #' \code{\link[=getDesignConditionalDunnett]{getDesignConditionalDunnett()}}.\cr #' See Koenig et al. (2008) and Wassmer & Brannath (2016), chapter 11 for details of the test procedure. #' #' @template return_object_closed_combination_test_results #' @template how_to_get_help_for_generics #' #' @template examples_get_closed_conditional_dunnett_test_results #' #' @export #' getClosedConditionalDunnettTestResults <- function(stageResults, ..., stage = stageResults$stage) { .assertIsStageResultsMultiArm(stageResults) .assertIsValidStage(stage, kMax = 2) .warnInCaseOfUnknownArguments(functionName = "getClosedConditionalDunnettTestResults", ignore = c("design"), ...) design <- stageResults$.design if (!is.null(list(...)[["design"]])) { design <- list(...)[["design"]] } .assertIsTrialDesignConditionalDunnett(design) result <- .getClosedConditionalDunnettTestResults(stageResults = stageResults, design = design, stage = stage) return(ClosedCombinationTestResults( .design = result$.design, .enrichment = grepl("Enrichment", .getClassName(stageResults)), intersectionTest = result$intersectionTest, indices = result$indices, separatePValues = result$separatePValues, conditionalErrorRate = result$conditionalErrorRate, secondStagePValues = result$secondStagePValues, rejected = result$rejected, rejectedIntersections = result$rejectedIntersections )) } .getClosedConditionalDunnettTestResults <- function(..., stageResults, design = stageResults$.design, stage = stageResults$stage) { gMax <- stageResults$getGMax() informationAtInterim <- design$informationAtInterim secondStageConditioning <- design$secondStageConditioning alpha <- design$alpha if (.isStageResultsMultiArmSurvival(stageResults)) { frac1 <- stageResults$.dataInput$allocationRatios[stageResults$.dataInput$stages == 1 & stageResults$.dataInput$groups <= gMax] / (stageResults$.dataInput$allocationRatios[stageResults$.dataInput$stages == 1 & stageResults$.dataInput$groups <= gMax] + 1) if (stage == 2) { frac2 <- stageResults$.dataInput$overallAllocationRatios[stageResults$.dataInput$stages == 2 & stageResults$.dataInput$groups <= gMax] / (stageResults$.dataInput$overallAllocationRatios[stageResults$.dataInput$stages == 2 & stageResults$.dataInput$groups <= gMax] + 1) } } else { frac1 <- stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 1 & stageResults$.dataInput$groups <= gMax] / (stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 1 & stageResults$.dataInput$groups <= gMax] + stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 1 & stageResults$.dataInput$groups == (gMax + 1)]) if (stage == 2) { frac2 <- stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 2 & stageResults$.dataInput$groups <= gMax] / (stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 2 & stageResults$.dataInput$groups <= gMax] + stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 2 & stageResults$.dataInput$groups == (gMax + 1)]) } } indices <- .getIndicesOfClosedHypothesesSystem(gMax = gMax) conditionalErrorRate <- matrix(rep(NA_real_, 2 * (2^gMax - 1)), 2^gMax - 1, 2) secondStagePValues <- matrix(rep(NA_real_, 2 * (2^gMax - 1)), 2^gMax - 1, 2) rejected <- matrix(rep(FALSE, gMax * 2), gMax, 2) colnames(conditionalErrorRate) <- paste("stage ", (1:2), sep = "") colnames(secondStagePValues) <- paste("stage ", (1:2), sep = "") dimnames(rejected) <- list(paste("arm ", 1:gMax, sep = ""), paste("stage ", (1:2), sep = "")) rejectedIntersections <- matrix(rep(FALSE, stage * nrow(indices)), nrow(indices), stage) if (stageResults$directionUpper) { signedTestStatistics <- stageResults$testStatistics signedOverallTestStatistics <- stageResults$overallTestStatistics signedOverallTestStatistics[, 2] <- sqrt(informationAtInterim) * stageResults$testStatistics[, 1] + sqrt(1 - informationAtInterim) * stageResults$testStatistics[, 2] } else { signedTestStatistics <- -stageResults$testStatistics signedOverallTestStatistics <- -stageResults$overallTestStatistics signedOverallTestStatistics[, 2] <- -(sqrt(informationAtInterim) * stageResults$testStatistics[, 1] + sqrt(1 - informationAtInterim) * stageResults$testStatistics[, 2]) } for (i in 1:(2^gMax - 1)) { zeta <- sqrt(frac1[indices[i, ] == 1]) sigma <- zeta %*% t(zeta) diag(sigma) <- 1 crit <- .getMultivariateDistribution( type = "quantile", upper = NA_real_, sigma = sigma, alpha = alpha ) integrand <- function(x) { innerProduct <- 1 for (g in (1:gMax)) { if (indices[i, g] == 1) { innerProduct <- innerProduct * stats::pnorm(((crit - sqrt(informationAtInterim) * signedTestStatistics[g, 1] + sqrt(1 - informationAtInterim) * sqrt(frac1[g]) * x)) / sqrt((1 - informationAtInterim) * (1 - frac1[g]))) } } return(innerProduct * dnorm(x)) } conditionalErrorRate[i, 1] <- 1 - integrate(integrand, lower = -Inf, upper = Inf)$value if (stage == 2) { if (!all(is.na(stageResults$separatePValues[indices[i, ] == 1, 2]))) { if (secondStageConditioning) { maxOverallTestStatistic <- max( signedOverallTestStatistics[indices[i, ] == 1, 2], na.rm = TRUE ) integrand <- function(x) { innerProduct <- 1 for (g in (1:gMax)) { if ((indices[i, g] == 1) && !is.na(stageResults$overallTestStatistics[g, 2])) { innerProduct <- innerProduct * stats::pnorm(((maxOverallTestStatistic - sqrt(informationAtInterim) * signedTestStatistics[g, 1] + sqrt(1 - informationAtInterim) * sqrt(frac2[g]) * x)) / sqrt((1 - informationAtInterim) * (1 - frac2[g]))) } } return(innerProduct * dnorm(x)) } secondStagePValues[i, 2] <- 1 - integrate(integrand, lower = -Inf, upper = Inf)$value } else { maxTestStatistic <- max(signedTestStatistics[indices[i, ] == 1, 2], na.rm = TRUE) integrand <- function(x) { innerProduct <- 1 for (g in (1:gMax)) { if ((indices[i, g] == 1) && !is.na(stageResults$separatePValues[g, 2])) { innerProduct <- innerProduct * stats::pnorm(((maxTestStatistic + sqrt(frac2[g]) * x)) / sqrt(1 - frac2[g])) } } return(innerProduct * dnorm(x)) } secondStagePValues[i, 2] <- 1 - integrate(integrand, lower = -Inf, upper = Inf)$value } } } } if (stage == 2) { rejectedIntersections[, 2] <- (secondStagePValues[, 2] <= conditionalErrorRate[, 1]) rejectedIntersections[is.na(rejectedIntersections[, 2]), 2] <- FALSE for (j in 1:gMax) { rejected[j, 2] <- all(rejectedIntersections[indices[, j] == 1, 2], na.rm = TRUE) } } return(list( .design = design, intersectionTest = "Dunnett", indices = indices, separatePValues = stageResults$separatePValues, conditionalErrorRate = conditionalErrorRate, secondStagePValues = secondStagePValues, rejected = rejected, rejectedIntersections = rejectedIntersections )) } .getConditionalDunnettTestForCI <- function(..., design, stageResults, treatmentArm) { gMax <- stageResults$getGMax() informationAtInterim <- design$informationAtInterim secondStageConditioning <- design$secondStageConditioning alpha <- design$alpha if (.isStageResultsMultiArmSurvival(stageResults)) { frac1 <- stageResults$.dataInput$allocationRatios[stageResults$.dataInput$stages == 1 & stageResults$.dataInput$groups <= gMax] / (stageResults$.dataInput$allocationRatios[stageResults$.dataInput$stages == 1 & stageResults$.dataInput$groups <= gMax] + 1) frac2 <- stageResults$.dataInput$overallAllocationRatios[stageResults$.dataInput$stages == 2 & stageResults$.dataInput$groups <= gMax] / (stageResults$.dataInput$overallAllocationRatios[stageResults$.dataInput$stages == 2 & stageResults$.dataInput$groups <= gMax] + 1) } else { frac1 <- stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 1 & stageResults$.dataInput$groups <= gMax] / (stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 1 & stageResults$.dataInput$groups <= gMax] + stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 1 & stageResults$.dataInput$groups == (gMax + 1)]) frac2 <- stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 2 & stageResults$.dataInput$groups <= gMax] / (stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 2 & stageResults$.dataInput$groups <= gMax] + stageResults$.dataInput$sampleSizes[stageResults$.dataInput$stages == 2 & stageResults$.dataInput$groups == (gMax + 1)]) } if (stageResults$directionUpper) { signedTestStatistics <- stageResults$testStatistics signedOverallTestStatistics <- stageResults$overallTestStatistics signedOverallTestStatistics[, 2] <- sqrt(informationAtInterim) * stageResults$testStatistics[, 1] + sqrt(1 - informationAtInterim) * stageResults$testStatistics[, 2] } else { signedTestStatistics <- -stageResults$testStatistics signedOverallTestStatistics <- -stageResults$overallTestStatistics signedOverallTestStatistics[, 2] <- -(sqrt(informationAtInterim) * stageResults$testStatistics[, 1] + sqrt(1 - informationAtInterim) * stageResults$testStatistics[, 2]) } zeta <- sqrt(frac1) sigma <- zeta %*% t(zeta) diag(sigma) <- 1 crit <- .getMultivariateDistribution(type = "quantile", upper = NA_real_, sigma = sigma, alpha = alpha) integrand <- function(x) { innerProduct <- 1 for (g in (1:gMax)) { innerProduct <- innerProduct * stats::pnorm(((crit - sqrt(informationAtInterim) * signedTestStatistics[g, 1] + sqrt(1 - informationAtInterim) * sqrt(frac1[g]) * x)) / sqrt((1 - informationAtInterim) * (1 - frac1[g]))) } return(innerProduct * dnorm(x)) } conditionalErrorRate <- 1 - integrate(integrand, lower = -Inf, upper = Inf)$value if (!is.na(stageResults$separatePValues[treatmentArm, 2])) { if (secondStageConditioning) { maxOverallTestStatistic <- signedOverallTestStatistics[treatmentArm, 2] integrand <- function(x) { innerProduct <- 1 for (g in (1:gMax)) { if (!is.na(stageResults$overallTestStatistics[g, 2])) { innerProduct <- innerProduct * stats::pnorm(((maxOverallTestStatistic - sqrt(informationAtInterim) * signedTestStatistics[g, 1] + sqrt(1 - informationAtInterim) * sqrt(frac2[g]) * x)) / sqrt((1 - informationAtInterim) * (1 - frac2[g]))) } } return(innerProduct * dnorm(x)) } secondStagePValues <- 1 - integrate(integrand, lower = -Inf, upper = Inf)$value } else { maxTestStatistic <- signedTestStatistics[treatmentArm, 2] integrand <- function(x) { innerProduct <- 1 for (g in (1:gMax)) { if (!is.na(stageResults$separatePValues[g, 2])) { innerProduct <- innerProduct * stats::pnorm(((maxTestStatistic + sqrt(frac2[g]) * x)) / sqrt(1 - frac2[g])) } } return(innerProduct * dnorm(x)) } secondStagePValues <- 1 - integrate(integrand, lower = -Inf, upper = Inf)$value } } return(secondStagePValues <= conditionalErrorRate) } #' #' Calculation of conditional rejection probability (CRP) #' #' @noRd #' .getConditionalRejectionProbabilitiesMultiArm <- function(stageResults, ..., stage = stageResults$stage, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsValidStage(stage, stageResults$.design$kMax) gMax <- stageResults$getGMax() if (.isTrialDesignInverseNormal(stageResults$.design)) { return(.getConditionalRejectionProbabilitiesMultiArmInverseNormal( stageResults = stageResults, stage = stage, ... )) } else if (.isTrialDesignFisher(stageResults$.design)) { return(.getConditionalRejectionProbabilitiesMultiArmFisher( stageResults = stageResults, stage = stage, ... )) } else if (.isTrialDesignConditionalDunnett(stageResults$.design)) { return(.getConditionalRejectionProbabilitiesMultiArmConditionalDunnett( stageResults = stageResults, ... )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of TrialDesignInverseNormal, TrialDesignFisher, or TrialDesignDunnett" ) } #' #' Calculation of CRP based on inverse normal method #' #' @noRd #' .getConditionalRejectionProbabilitiesMultiArmInverseNormal <- function(..., stageResults, stage) { design <- stageResults$.design .assertIsTrialDesignInverseNormal(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalRejectionProbabilitiesMultiArmInverseNormal", ignore = c("stage", "design"), ... ) kMax <- design$kMax if (kMax == 1) { return(as.matrix(NA_real_)) } gMax <- stageResults$getGMax() conditionalRejectionProbabilities <- matrix(NA_real_, nrow = gMax, ncol = kMax) weights <- .getWeightsInverseNormal(design) informationRates <- design$informationRates ctr <- .performClosedCombinationTest(stageResults = stageResults) criticalValues <- design$criticalValues for (stageIndex in (1:min(stage, kMax - 1))) { for (g in 1:gMax) { if (!is.na(ctr$separatePValues[g, stageIndex])) { # shifted decision region for use in getGroupSeqProbs # Inverse Normal Method shiftedDecisionRegionUpper <- criticalValues[(stageIndex + 1):kMax] * sqrt(sum(weights[1:stageIndex]^2) + cumsum(weights[(stageIndex + 1):kMax]^2)) / sqrt(cumsum(weights[(stageIndex + 1):kMax]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, g] == 1, stageIndex], na.rm = TRUE) * sqrt(sum(weights[1:stageIndex]^2)) / sqrt(cumsum(weights[(stageIndex + 1):kMax]^2)) if (stageIndex == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stageIndex + 1):(kMax - 1)] * sqrt(sum(weights[1:stageIndex]^2) + cumsum(weights[(stageIndex + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stageIndex + 1):(kMax - 1)]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, g] == 1, stageIndex], na.rm = TRUE) * sqrt(sum(weights[1:stageIndex]^2)) / sqrt(cumsum(weights[(stageIndex + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stageIndex + 1):kMax] - informationRates[stageIndex]) / (1 - informationRates[stageIndex]) decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) conditionalRejectionProbabilities[g, stageIndex] <- sum(probs[3, ] - probs[2, ]) } } } return(conditionalRejectionProbabilities) } #' #' Calculation of conditional rejection probability based on Fisher's combination test #' #' @noRd #' .getConditionalRejectionProbabilitiesMultiArmFisher <- function(..., stageResults, stage) { design <- stageResults$.design .assertIsTrialDesignFisher(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalRejectionProbabilitiesMultiArmFisher", ignore = c("stage", "design"), ... ) kMax <- design$kMax if (kMax == 1) { return(as.matrix(NA_real_)) } gMax <- stageResults$getGMax() criticalValues <- design$criticalValues weights <- .getWeightsFisher(design) intersectionTest <- stageResults$intersectionTest conditionalRejectionProbabilities <- matrix(NA_real_, nrow = gMax, ncol = kMax) if (design$bindingFutility) { alpha0Vec <- design$alpha0Vec } else { alpha0Vec <- rep(1, kMax - 1) } for (g in 1:gMax) { for (stageIndex in (1:min(stage, kMax - 1))) { if (!is.na(stageResults$separatePValues[g, stageIndex])) { if (gMax == 1) { pValues <- stageResults$separatePValues[1, 1:stageIndex] } else { ctr <- .performClosedCombinationTest( stageResults = stageResults, design = design, intersectionTest = intersectionTest ) pValues <- ctr$adjustedStageWisePValues[ctr$indices[, g] == 1, ][which.max( ctr$overallAdjustedTestStatistics[ctr$indices[, g] == 1, stageIndex] ), 1:stageIndex] } if (prod(pValues^weights[1:stageIndex]) <= criticalValues[stageIndex]) { conditionalRejectionProbabilities[g, stageIndex] <- 1 } else { if (stageIndex < kMax - 1) { conditionalRejectionProbabilities[g, stageIndex] <- .getFisherCombinationSize( kMax - stageIndex, alpha0Vec[(stageIndex + 1):(kMax - 1)], (criticalValues[(stageIndex + 1):kMax] / prod(pValues^weights[1:stageIndex]))^(1 / weights[stageIndex + 1]), weights[(stageIndex + 2):kMax] / weights[stageIndex + 1] ) } else { conditionalRejectionProbabilities[g, stageIndex] <- (criticalValues[kMax] / prod(pValues^weights[1:stageIndex]))^(1 / weights[kMax]) } } if (design$bindingFutility) { if (pValues[stageIndex] > alpha0Vec[stageIndex]) { conditionalRejectionProbabilities[g, stageIndex:stage] <- 0 break } } } } } conditionalRejectionProbabilities[conditionalRejectionProbabilities >= 1] <- 1 conditionalRejectionProbabilities[conditionalRejectionProbabilities < 0] <- NA_real_ return(conditionalRejectionProbabilities) } #' #' Calculation of CRP based on conditional Dunnett #' #' @noRd #' .getConditionalRejectionProbabilitiesMultiArmConditionalDunnett <- function(..., stageResults) { design <- stageResults$.design .assertIsTrialDesignConditionalDunnett(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalRejectionProbabilitiesMultiArmConditionalDunnett", ignore = c("stage", "intersectionTest", "design"), ... ) kMax <- 2 gMax <- stageResults$getGMax() conditionalRejectionProbabilities <- matrix(NA_real_, nrow = gMax, ncol = kMax) ctr <- getClosedConditionalDunnettTestResults(stageResults = stageResults, design = design) stage <- 1 for (g in 1:gMax) { if (!is.na(ctr$separatePValues[g, stage])) { conditionalRejectionProbabilities[g, 2] <- 1 - stats::pnorm(.getOneMinusQNorm(min(ctr$conditionalErrorRate[ ctr$indices[, g] == 1, stage ], na.rm = TRUE))) } } return(conditionalRejectionProbabilities) } #' #' Plotting conditional power and likelihood #' #' @noRd #' .getConditionalPowerPlotMultiArm <- function(stageResults, ..., nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaRange = NA_real_, assumedStDevs = NA_real_, piTreatmentRange = NA_real_, piControl = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_, showArms = NA_real_) { .stopInCaseOfIllegalStageDefinition2(...) kMax <- stageResults$.design$kMax stage <- stageResults$stage if (stage == kMax && length(nPlanned) > 0) { stage <- kMax - 1 } if (stage < 1 || kMax == 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "cannot plot conditional power of a fixed design") } if (stage >= kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the conditional power plot is only available for subsequent stages. ", "Please specify a 'stage' (", stage, ") < 'kMax' (", kMax, ")" ) } .assertIsValidNPlanned(nPlanned = nPlanned, kMax = kMax, stage = stage) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) if (stageResults$isDatasetMeans()) { .warnInCaseOfUnusedArgument(piTreatmentRange, "piTreatmentRange", NA_real_, "plot") .warnInCaseOfUnusedArgument(piControl, "piControl", NA_real_, "plot") return(.getConditionalPowerLikelihoodMeansMultiArm( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaRange = thetaRange, assumedStDevs = assumedStDevs, iterations = iterations, seed = seed )) } else if (stageResults$isDatasetRates()) { .warnInCaseOfUnusedArgument(thetaRange, "thetaRange", NA_real_, "plot") .warnInCaseOfUnusedArgument(assumedStDevs, "assumedStDevs", NA_real_, "plot") return(.getConditionalPowerLikelihoodRatesMultiArm( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piTreatmentRange = piTreatmentRange, piControl = piControl, iterations = iterations, seed = seed )) } else if (stageResults$isDatasetSurvival()) { .warnInCaseOfUnusedArgument(piTreatmentRange, "piTreatmentRange", NA_real_, "plot") .warnInCaseOfUnusedArgument(piControl, "piControl", NA_real_, "plot") .warnInCaseOfUnusedArgument(assumedStDevs, "assumedStDevs", NA_real_, "plot") return(.getConditionalPowerLikelihoodSurvivalMultiArm( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaRange = thetaRange, iterations = iterations, seed = seed )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(stageResults$.dataInput), "' is not implemented yet" ) } rpact/R/f_analysis_enrichment_rates.R0000644000176200001440000015510614445307575017477 0ustar liggesusers## | ## | *Analysis of rates in enrichment designs with adaptive test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_logger.R NULL .calcRatesTestStatistics <- function(dataInput, subset, stage, thetaH0, stratifiedAnalysis, normalApproximation, directionUpper) { n <- rep(NA_real_, 2) on <- rep(NA_real_, 2) e <- rep(NA_real_, 2) oe <- rep(NA_real_, 2) testStatistics <- NA_real_ separatePValues <- NA_real_ if (!all(is.na(dataInput$getSampleSizes(stage = stage, subset = subset)))) { for (i in 1:2) { # calculation of sample size and events for overall data on[i] <- sum(dataInput$getOverallSampleSizes(stage = stage, subset = subset, group = i), na.rm = TRUE) oe[i] <- sum(dataInput$getOverallEvents(stage = stage, subset = subset, group = i), na.rm = TRUE) } if (stratifiedAnalysis) { actEv <- dataInput$getEvents(stage = stage, subset = subset, group = 1) ctrEv <- dataInput$getEvents(stage = stage, subset = subset, group = 2) actN <- dataInput$getSampleSize(stage = stage, subset = subset, group = 1) ctrN <- dataInput$getSampleSize(stage = stage, subset = subset, group = 2) weights <- actN * ctrN / (actN + ctrN) if (thetaH0 == 0) { if (sum(actEv + ctrEv, na.rm = TRUE) == 0 || sum(actEv + ctrEv, na.rm = TRUE) == sum(actN + ctrN, na.rm = TRUE)) { testStatistics <- 0 } else { rateH0 <- (actEv + ctrEv) / (actN + ctrN) testStatistics <- sum((actEv / actN - ctrEv / ctrN - thetaH0) * weights, na.rm = TRUE) / sqrt(sum(rateH0 * (1 - rateH0) * weights, na.rm = TRUE)) } } else { actMl <- rep(NA_real_, length(subset)) ctrMl <- rep(NA_real_, length(subset)) for (population in (1:length(subset))) { y <- .getFarringtonManningValues( rate1 = actEv[population] / actN[population], rate2 = ctrEv[population] / ctrN[population], theta = thetaH0, allocation = actN[population] / ctrN[population], method = "diff" ) actMl[population] <- y$ml1 ctrMl[population] <- y$ml2 } testStatistics <- sum((actEv / actN - ctrEv / ctrN - thetaH0) * weights, na.rm = TRUE) / sqrt(sum((actMl * (1 - actMl) / actN + ctrMl * (1 - ctrMl) / ctrN) * weights^2, na.rm = TRUE)) } if (directionUpper) { separatePValues <- 1 - stats::pnorm(testStatistics) } else { separatePValues <- stats::pnorm(testStatistics) } } # non-stratified analysis else { for (i in 1:2) { n[i] <- sum(dataInput$getSampleSizes(stage = stage, subset = subset, group = i), na.rm = TRUE) e[i] <- sum(dataInput$getEvents(stage = stage, subset = subset, group = i), na.rm = TRUE) } if (normalApproximation) { if (thetaH0 == 0) { if (!is.na(e[1])) { if ((e[1] + e[2] == 0) || (e[1] + e[2] == n[1] + n[2])) { testStatistics <- 0 } else { rateH0 <- (e[1] + e[2]) / (n[1] + n[2]) testStatistics <- (e[1] / n[1] - e[2] / n[2] - thetaH0) / sqrt(rateH0 * (1 - rateH0) * (1 / n[1] + 1 / n[2])) } } else { testStatistics <- NA_real_ } } else { y <- .getFarringtonManningValues( rate1 = e[1] / n[1], rate2 = e[2] / n[2], theta = thetaH0, allocation = n[1] / n[2], method = "diff" ) testStatistics <- (e[1] / n[1] - e[2] / n[2] - thetaH0) / sqrt(y$ml1 * (1 - y$ml1) / n[1] + y$ml2 * (1 - y$ml2) / n[2]) } if (directionUpper) { separatePValues <- 1 - stats::pnorm(testStatistics) } else { separatePValues <- stats::pnorm(testStatistics) } } else { if (thetaH0 != 0) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "'thetaH0' (", thetaH0, ") must be 0 to perform Fisher's exact test" ) } if (directionUpper) { separatePValues <- stats::phyper(e[1] - 1, e[1] + e[2], n[1] + n[2] - e[1] - e[2], n[1], lower.tail = FALSE ) } else { separatePValues <- stats::phyper(e[1], e[1] + e[2], n[1] + n[2] - e[1] - e[2], n[1], lower.tail = TRUE ) } if (directionUpper) { testStatistics <- .getOneMinusQNorm(separatePValues) } else { testStatistics <- -.getOneMinusQNorm(separatePValues) } } } } if ("R" %in% subset && is.na(dataInput$getSampleSizes(stage = stage, subset = "R", group = 1)) || ("S1" %in% subset) && is.na(dataInput$getSampleSizes(stage = stage, subset = "S1", group = 1)) || ("S2" %in% subset) && is.na(dataInput$getSampleSizes(stage = stage, subset = "S2", group = 1)) || ("S3" %in% subset) && is.na(dataInput$getSampleSizes(stage = stage, subset = "S3", group = 1)) || ("S4" %in% subset) && is.na(dataInput$getSampleSizes(stage = stage, subset = "S4", group = 1)) ) { n <- rep(NA_real_, 2) e <- rep(NA_real_, 2) on <- rep(NA_real_, 2) oe <- rep(NA_real_, 2) separatePValues <- NA_real_ testStatistics <- NA_real_ } return(list( populationNs = n, populationEvents = e, overallRates1 = oe[1] / on[1], overallSampleSizes1 = on[1], overallRates2 = oe[2] / on[2], overallSampleSizes2 = on[2], separatePValues = separatePValues, testStatistics = testStatistics )) } .getStageResultsRatesEnrichment <- function(..., design, dataInput, thetaH0 = C_THETA_H0_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, calculateSingleStepAdjusted = FALSE, userFunctionCallEnabled = FALSE) { .assertIsTrialDesign(design) .assertIsDatasetRates(dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertIsValidDirectionUpper(directionUpper, design$sided) .assertIsSingleLogical(normalApproximation, "normalApproximation") .assertIsValidIntersectionTestEnrichment(design, intersectionTest) .warnInCaseOfUnknownArguments( functionName = ".getStageResultsRatesEnrichment", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) kMax <- design$kMax if (dataInput$isStratified()) { gMax <- log(length(levels(factor(dataInput$subsets))), 2) + 1 } else { gMax <- length(levels(factor(dataInput$subsets))) } .assertIsValidIntersectionTestEnrichment(design, intersectionTest) if ((gMax > 2) && intersectionTest == "SpiessensDebois") { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "gMax (", gMax, ") > 2: Spiessens & Debois intersection test test can only be used for one subset" ) } if (intersectionTest == "SpiessensDebois" && !normalApproximation) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Spiessens & Debois test cannot be used with Fisher's ", "exact test (normalApproximation = FALSE)", call. = FALSE ) } if (stratifiedAnalysis && !normalApproximation) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "stratified version is not available for Fisher's exact test" ) } if (stratifiedAnalysis && !dataInput$isStratified()) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "stratified analysis is only possible for stratified data input" ) } if (dataInput$isStratified() && (gMax > 4)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "gMax (", gMax, ") > 4: stratified analysis not implemented" ) } stageResults <- StageResultsEnrichmentRates( design = design, dataInput = dataInput, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), normalApproximation = normalApproximation, directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, stage = stage ) .setValueAndParameterType(stageResults, "stratifiedAnalysis", stratifiedAnalysis, C_STRATIFIED_ANALYSIS_DEFAULT) .setValueAndParameterType(stageResults, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_ENRICHMENT_DEFAULT) overallSampleSizes1 <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallSampleSizes2 <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallRates1 <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallRates2 <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallEvents <- rep(NA_real_, kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) dimnames(testStatistics) <- list(paste("population ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) dimnames(separatePValues) <- list(paste("population ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) subsets <- .createSubsetsByGMax(gMax = gMax, stratifiedInput = dataInput$isStratified(), subsetIdPrefix = "S") for (k in 1:stage) { for (population in (1:gMax)) { subset <- subsets[[population]] results <- .calcRatesTestStatistics( dataInput, subset, k, thetaH0, stratifiedAnalysis, normalApproximation, directionUpper ) testStatistics[population, k] <- results$testStatistics separatePValues[population, k] <- results$separatePValues overallSampleSizes1[population, k] <- results$overallSampleSizes1 overallSampleSizes2[population, k] <- results$overallSampleSizes2 overallRates1[population, k] <- results$overallRates1 overallRates2[population, k] <- results$overallRates2 } } stageResults$overallTestStatistics <- overallTestStatistics stageResults$overallPisTreatment <- overallRates1 stageResults$overallPisControl <- overallRates2 stageResults$.overallSampleSizes1 <- overallSampleSizes1 stageResults$.overallSampleSizes2 <- overallSampleSizes2 stageResults$testStatistics <- testStatistics stageResults$separatePValues <- separatePValues stageResults$effectSizes <- overallRates1 - overallRates2 stageResults$.setParameterType("effectSizes", C_PARAM_GENERATED) .setWeightsToStageResults(design, stageResults) if (!calculateSingleStepAdjusted) { return(stageResults) } # Calculation of single stage adjusted p-Values and overall test statistics # for determination of RCIs singleStepAdjustedPValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) combInverseNormal <- matrix(NA_real_, nrow = gMax, ncol = kMax) combFisher <- matrix(NA_real_, nrow = gMax, ncol = kMax) if (.isTrialDesignInverseNormal(design)) { weightsInverseNormal <- stageResults$weightsInverseNormal } else if (.isTrialDesignFisher(design)) { weightsFisher <- stageResults$weightsFisher } for (k in 1:stage) { selected <- sum(!is.na(separatePValues[, k])) for (population in 1:gMax) { if ((intersectionTest == "Bonferroni") || (intersectionTest == "Simes")) { singleStepAdjustedPValues[population, k] <- min(1, separatePValues[population, k] * selected) } else if (intersectionTest == "Sidak") { singleStepAdjustedPValues[population, k] <- 1 - (1 - separatePValues[population, k])^selected } else if (intersectionTest == "SpiessensDebois") { if (!is.na(testStatistics[population, k])) { df <- NA_real_ sigma <- 1 if (selected == 2) { if (dataInput$isStratified()) { sigma <- matrix(rep(sqrt(sum(dataInput$getSampleSizes(stage = k, subset = "S1")) / sum(dataInput$getSampleSizes(stage = k))), 4), nrow = 2) } else { sigma <- matrix(rep(sqrt(sum(dataInput$getSampleSizes(stage = k, subset = "S1")) / sum(dataInput$getSampleSizes(stage = k, subset = "F"))), 4), nrow = 2) } diag(sigma) <- 1 } singleStepAdjustedPValues[population, k] <- 1 - .getMultivariateDistribution( type = "normal", upper = ifelse(directionUpper, testStatistics[population, k], -testStatistics[population, k]), sigma = sigma, df = NA ) } } if (.isTrialDesignInverseNormal(design)) { combInverseNormal[population, k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(singleStepAdjustedPValues[population, 1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) } else if (.isTrialDesignFisher(design)) { combFisher[population, k] <- prod(singleStepAdjustedPValues[population, 1:k]^weightsFisher[1:k]) } } } stageResults$singleStepAdjustedPValues <- singleStepAdjustedPValues stageResults$.setParameterType("singleStepAdjustedPValues", C_PARAM_GENERATED) if (.isTrialDesignFisher(design)) { stageResults$combFisher <- combFisher stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$combInverseNormal <- combInverseNormal stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) } return(stageResults) } .getAnalysisResultsRatesEnrichment <- function(..., design, dataInput) { if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsRatesInverseNormalEnrichment(design = design, dataInput = dataInput, ...)) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsRatesFisherEnrichment(design = design, dataInput = dataInput, ...)) } .stopWithWrongDesignMessageEnrichment(design) } .getAnalysisResultsRatesInverseNormalEnrichment <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, thetaH0 = C_THETA_H0_RATES_DEFAULT, piTreatments = NA_real_, piControls = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsRatesInverseNormalEnrichment", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsEnrichmentInverseNormal(design = design, dataInput = dataInput) results <- .getAnalysisResultsRatesEnrichmentAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, thetaH0 = thetaH0, piTreatments = piTreatments, piControls = piControls, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance ) return(results) } .getAnalysisResultsRatesFisherEnrichment <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, thetaH0 = C_THETA_H0_RATES_DEFAULT, piTreatments = NA_real_, piControls = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsRatesFisherEnrichment", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsEnrichmentFisher(design = design, dataInput = dataInput) .setValueAndParameterType(results, "iterations", as.integer(iterations), C_ITERATIONS_DEFAULT) .setValueAndParameterType(results, "seed", seed, NA_real_) results <- .getAnalysisResultsRatesEnrichmentAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, thetaH0 = thetaH0, piTreatments = piTreatments, piControls = piControls, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } .getAnalysisResultsRatesEnrichmentAll <- function(..., results, design, dataInput, intersectionTest, stage, directionUpper, normalApproximation, stratifiedAnalysis, thetaH0, piTreatments, piControls, nPlanned, allocationRatioPlanned, tolerance, iterations, seed) { startTime <- Sys.time() stageResults <- .getStageResultsRatesEnrichment( design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis ) results$.setStageResults(stageResults) .logProgress("Stage results calculated", startTime = startTime) gMax <- stageResults$getGMax() piControls <- .assertIsValidPiControlForEnrichment(piControls, stageResults, stage, results = results) piTreatments <- .assertIsValidPiTreatmentsForEnrichment(piTreatments, stageResults, stage, results = results) .setValueAndParameterType(results, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_ENRICHMENT_DEFAULT) .setValueAndParameterType(results, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType(results, "normalApproximation", normalApproximation, C_NORMAL_APPROXIMATION_RATES_DEFAULT) .setValueAndParameterType(results, "stratifiedAnalysis", stratifiedAnalysis, C_STRATIFIED_ANALYSIS_DEFAULT) .setValueAndParameterType(results, "thetaH0", thetaH0, C_THETA_H0_RATES_DEFAULT) .setConditionalPowerArguments(results, dataInput, nPlanned, allocationRatioPlanned) .setNPlannedAndPi(results, nPlanned, "piControls", piControls, piTreatments) if (results$.getParameterType("piControls") %in% c(C_PARAM_TYPE_UNKNOWN, C_PARAM_NOT_APPLICABLE)) { .setValueAndParameterType( results, "piControls", matrix(piControls, ncol = 1), matrix(rep(NA_real_, gMax), ncol = 1) ) } else { results$piControls <- matrix(piControls, ncol = 1) } if (results$.getParameterType("piTreatments") %in% c(C_PARAM_TYPE_UNKNOWN, C_PARAM_NOT_APPLICABLE)) { .setValueAndParameterType( results, "piTreatments", matrix(piTreatments, ncol = 1), matrix(rep(NA_real_, gMax), ncol = 1) ) } else { if (is.matrix(piTreatments)) { results$piTreatments <- piTreatments } else { results$piTreatments <- matrix(piTreatments, ncol = 1) } } startTime <- Sys.time() results$.closedTestResults <- getClosedCombinationTestResults(stageResults = stageResults) .logProgress("Closed test calculated", startTime = startTime) if (design$kMax > 1) { # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { results$.conditionalPowerResults <- .getConditionalPowerRatesEnrichment( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piTreatments = piTreatments, piControls = piControls, iterations = iterations, seed = seed ) .synchronizeIterationsAndSeed(results) } else { results$.conditionalPowerResults <- .getConditionalPowerRatesEnrichment( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piTreatments = piTreatments, piControls = piControls ) results$conditionalPower <- results$.conditionalPowerResults$conditionalPower results$.setParameterType("conditionalPower", C_PARAM_GENERATED) } .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() results$conditionalRejectionProbabilities <- .getConditionalRejectionProbabilitiesEnrichment( stageResults = stageResults, stage = stage ) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_GENERATED) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) } else { results$.setParameterType("conditionalPower", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalPowerSimulated", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_NOT_APPLICABLE) } # RCI - repeated confidence interval repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsRatesEnrichment( design = design, dataInput = dataInput, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, stage = stage, normalApproximation = normalApproximation, tolerance = tolerance ) results$repeatedConfidenceIntervalLowerBounds <- matrix(rep(NA_real_, gMax * design$kMax), nrow = gMax, ncol = design$kMax) results$repeatedConfidenceIntervalUpperBounds <- results$repeatedConfidenceIntervalLowerBounds for (k in 1:design$kMax) { for (population in 1:gMax) { results$repeatedConfidenceIntervalLowerBounds[population, k] <- repeatedConfidenceIntervals[population, 1, k] results$repeatedConfidenceIntervalUpperBounds[population, k] <- repeatedConfidenceIntervals[population, 2, k] } } results$.setParameterType("repeatedConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("repeatedConfidenceIntervalUpperBounds", C_PARAM_GENERATED) # repeated p-value results$repeatedPValues <- .getRepeatedPValuesEnrichment(stageResults = stageResults, tolerance = tolerance) results$.setParameterType("repeatedPValues", C_PARAM_GENERATED) return(results) } .getRootThetaRatesEnrichment <- function(..., design, dataInput, population, stage, directionUpper, normalApproximation, stratifiedAnalysis, intersectionTest, thetaLow, thetaUp, firstParameterName, secondValue, tolerance) { result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsRatesEnrichment( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][population, stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, callingFunctionInformation = ".getRootThetaRatesEnrichment" ) return(result) } .getRepeatedConfidenceIntervalsRatesEnrichmentAll <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { .assertIsValidIntersectionTestEnrichment(design, intersectionTest) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) stageResults <- .getStageResultsRatesEnrichment( design = design, dataInput = dataInput, stage = stage, thetaH0 = 0, directionUpper = directionUpper, intersectionTest = intersectionTest, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, calculateSingleStepAdjusted = FALSE ) gMax <- stageResults$getGMax() repeatedConfidenceIntervals <- array(NA_real_, dim = c(gMax, 2, design$kMax)) # Repeated onfidence intervals when using combination tests if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT criticalValues <- design$criticalValues conditionFunction <- .isFirstValueSmallerThanSecondValue } else if (.isTrialDesignInverseNormal(design)) { bounds <- design$futilityBounds border <- C_FUTILITY_BOUNDS_DEFAULT criticalValues <- design$criticalValues criticalValues[is.infinite(criticalValues) & criticalValues > 0] <- C_QNORM_MAXIMUM criticalValues[is.infinite(criticalValues) & criticalValues < 0] <- C_QNORM_MINIMUM conditionFunction <- .isFirstValueGreaterThanSecondValue } # necessary for adjustment for binding futility boundaries futilityCorr <- rep(NA_real_, design$kMax) stages <- (1:stage) for (k in stages) { startTime <- Sys.time() for (population in 1:gMax) { if (!is.na(stageResults$testStatistics[population, k]) && criticalValues[k] < C_QNORM_MAXIMUM) { thetaLow <- -1 + tolerance thetaUp <- 1 - tolerance # finding upper and lower RCI limits through root function repeatedConfidenceIntervals[population, 1, k] <- .getRootThetaRatesEnrichment( design = design, dataInput = dataInput, population = population, stage = k, directionUpper = TRUE, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) repeatedConfidenceIntervals[population, 2, k] <- .getRootThetaRatesEnrichment( design = design, dataInput = dataInput, population = population, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) # adjustment for binding futility bounds if (k > 1 && !is.na(bounds[k - 1]) && conditionFunction(bounds[k - 1], border) && design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "singleStepAdjustedPValues", firstParameterName ) futilityCorr[k] <- .getRootThetaRatesEnrichment( design = design, dataInput = dataInput, population = population, stage = k - 1, directionUpper = directionUpper, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, thetaLow = thetaLow, thetaUp = thetaUp, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance ) if (directionUpper) { repeatedConfidenceIntervals[population, 1, k] <- min( min(futilityCorr[2:k]), repeatedConfidenceIntervals[population, 1, k] ) } else { repeatedConfidenceIntervals[population, 2, k] <- max( max(futilityCorr[2:k]), repeatedConfidenceIntervals[population, 2, k] ) } } if (!is.na(repeatedConfidenceIntervals[population, 1, k]) && !is.na(repeatedConfidenceIntervals[population, 2, k]) && repeatedConfidenceIntervals[population, 1, k] > repeatedConfidenceIntervals[population, 2, k]) { repeatedConfidenceIntervals[population, , k] <- rep(NA_real_, 2) } } } .logProgress("Repeated confidence intervals for stage %s calculated", startTime = startTime, k) } return(repeatedConfidenceIntervals) } #' #' RCIs based on inverse normal combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsRatesEnrichmentInverseNormal <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { if (!normalApproximation) { message("Repeated confidence intervals will be calculated under the normal approximation") normalApproximation <- TRUE } .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsRatesEnrichmentInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsRatesEnrichmentAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combInverseNormal", ... )) } #' #' RCIs based on Fisher's combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsRatesEnrichmentFisher <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { if (!normalApproximation) { message("Repeated confidence intervals will be calculated under the normal approximation") normalApproximation <- TRUE } .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsRatesEnrichmentFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsRatesEnrichmentAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, stratifiedAnalysis = stratifiedAnalysis, directionUpper = directionUpper, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combFisher", ... )) } #' #' Calculation of repeated confidence intervals (RCIs) for Rates #' #' @noRd #' .getRepeatedConfidenceIntervalsRatesEnrichment <- function(..., design) { if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsRatesEnrichmentInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsRatesEnrichmentFisher(design = design, ...)) } .stopWithWrongDesignMessageEnrichment(design) } #' #' Calculation of conditional power for Rates #' #' @noRd #' .getConditionalPowerRatesEnrichment <- function(..., stageResults, stage = stageResults$stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, piTreatments = NA_real_, piControls = NA_real_, useAdjustment = TRUE, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { design <- stageResults$.design gMax <- stageResults$getGMax() kMax <- design$kMax piTreatmentsH1 <- .getOptionalArgument("piTreatmentsH1", ...) if (!is.null(piTreatmentsH1) && !is.na(piTreatmentsH1)) { if (!is.na(piTreatments)) { warning(sQuote("piTreatments"), " will be ignored because ", sQuote("piTreatmentsH1"), " is defined", call. = FALSE ) } piTreatments <- piTreatmentsH1 } if (is.matrix(piTreatments)) { piTreatments <- as.vector(piTreatments) } piControlH1 <- .getOptionalArgument("piControlH1", ...) if (!is.null(piControlH1) && !is.na(piControlH1)) { if (!is.na(piControl)) { warning(sQuote("piControl"), " will be ignored because ", sQuote("piControlH1"), " is defined", call. = FALSE ) } piControl <- piControlH1 } results <- ConditionalPowerResultsEnrichmentRates( .design = design, .stageResults = stageResults, piControls = piControls, piTreatments = piTreatments, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) if (any(is.na(nPlanned))) { return(results) } .assertIsValidStage(stage, kMax) if (stage == kMax) { .logDebug( "Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", kMax, ")" ) return(results) } if (!.isValidNPlanned(nPlanned = nPlanned, kMax = kMax, stage = stage)) { return(results) } .assertIsValidNPlanned(nPlanned, kMax, stage) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) results$.setParameterType("nPlanned", C_PARAM_USER_DEFINED) results$.setParameterType( "allocationRatioPlanned", ifelse(allocationRatioPlanned == C_ALLOCATION_RATIO_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED) ) piControls <- .assertIsValidPiControlForEnrichment(piControls, stageResults, stage, results = results) piTreatments <- .assertIsValidPiTreatmentsForEnrichment(piTreatments, stageResults, stage, results = results) if ((length(piTreatments) != 1) && (length(piTreatments) != gMax)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0( "length of 'piTreatments' (%s) ", "must be equal to 'gMax' (%s) or 1" ), .arrayToString(piTreatments), gMax) ) } if ((length(piControls) != 1) && (length(piControls) != gMax)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0( "length of 'piControls' (%s) ", "must be equal to 'gMax' (%s) or 1" ), .arrayToString(piControls), gMax) ) } if (.isTrialDesignInverseNormal(design)) { return(.getConditionalPowerRatesEnrichmentInverseNormal( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piControls = piControls, piTreatments = piTreatments, ... )) } else if (.isTrialDesignFisher(design)) { return(.getConditionalPowerRatesEnrichmentFisher( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, useAdjustment = useAdjustment, piControls = piControls, piTreatments = piTreatments, iterations = iterations, seed = seed, ... )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of TrialDesignInverseNormal or TrialDesignFisher" ) } #' #' Calculation of conditional power based on inverse normal method #' #' @noRd #' .getConditionalPowerRatesEnrichmentInverseNormal <- function(..., results, design, stageResults, stage, allocationRatioPlanned, nPlanned, piTreatments, piControls) { .assertIsTrialDesignInverseNormal(design) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerRatesEnrichmentInverseNormal", ignore = c("piTreatmentsH1", "piControlH1"), ... ) kMax <- design$kMax gMax <- stageResults$getGMax() weights <- .getWeightsInverseNormal(design) informationRates <- design$informationRates nPlanned <- c(rep(NA_real_, stage), nPlanned) condError <- .getConditionalRejectionProbabilitiesEnrichment(design = design, stageResults = stageResults)[, stage] ml <- (allocationRatioPlanned * piTreatments + piControls) / (1 + allocationRatioPlanned) adjustment <- .getOneMinusQNorm(condError) * (1 - sqrt(ml * (1 - ml) * (1 + allocationRatioPlanned)) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControls * (1 - piControls))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * sum(nPlanned[(stage + 1):kMax])) adjustment[condError < 1e-12] <- 0 .setValueAndParameterType( results, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT ) results$.setParameterType("piControls", C_PARAM_DEFAULT_VALUE) if (length(piTreatments) == 1) { piTreatments <- rep(piTreatments, gMax) results$.setParameterType("piTreatments", C_PARAM_GENERATED) } else { results$.setParameterType("piTreatments", C_PARAM_DEFAULT_VALUE) } if (stageResults$directionUpper) { standardizedEffect <- (piTreatments - piControls - stageResults$thetaH0) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControls * (1 - piControls)) * sqrt(1 + allocationRatioPlanned) + adjustment } else { standardizedEffect <- -(piTreatments - piControls - stageResults$thetaH0) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControls * (1 - piControls)) * sqrt(1 + allocationRatioPlanned) + adjustment } nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned ctr <- .performClosedCombinationTest(stageResults = stageResults) criticalValues <- design$criticalValues for (population in 1:gMax) { if (!is.na(ctr$separatePValues[population, stage])) { # shifted decision region for use in getGroupSeqProbs # Inverse Normal Method shiftedDecisionRegionUpper <- criticalValues[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, population] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - standardizedEffect[population] * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, population] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - standardizedEffect[population] * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) results$conditionalPower[population, (stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$piTreatments <- piTreatments results$piControls <- piControls return(results) } #' #' Calculation of conditional power based on Fisher's combination test #' #' @noRd #' .getConditionalPowerRatesEnrichmentFisher <- function(..., results, design, stageResults, stage, allocationRatioPlanned, nPlanned, piTreatments, piControls, useAdjustment = TRUE, iterations, seed) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerRatesEnrichmentFisher", ignore = c("piTreatmentsH1", "piControlH1"), ... ) kMax <- design$kMax gMax <- stageResults$getGMax() criticalValues <- design$criticalValues weightsFisher <- .getWeightsFisher(design) results$iterations <- as.integer(iterations) results$.setParameterType("iterations", C_PARAM_USER_DEFINED) results$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) results$seed <- .setSeed(seed) results$simulated <- FALSE results$.setParameterType("simulated", C_PARAM_DEFAULT_VALUE) nPlanned <- c(rep(NA_real_, stage), nPlanned) if (useAdjustment) { condError <- .getConditionalRejectionProbabilitiesEnrichment( design = design, stageResults = stageResults, iterations = iterations, seed = seed )[, stage] ml <- (allocationRatioPlanned * piTreatments + piControls) / (1 + allocationRatioPlanned) adjustment <- .getOneMinusQNorm(condError) * (1 - sqrt(ml * (1 - ml) * (1 + allocationRatioPlanned)) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControls * (1 - piControls))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * sum(nPlanned[(stage + 1):kMax])) adjustment[condError < 1e-12] <- 0 } else { adjustment <- 0 } .setValueAndParameterType(results, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) if (length(piTreatments) == 1) { piTreatments <- rep(piTreatments, gMax) results$.setParameterType("piTreatments", C_PARAM_GENERATED) } else { results$.setParameterType("piTreatments", C_PARAM_DEFAULT_VALUE) } if (stageResults$directionUpper) { standardizedEffect <- (piTreatments - piControls) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControls * (1 - piControls)) * sqrt(1 + allocationRatioPlanned) + adjustment } else { standardizedEffect <- -(piTreatments - piControls - stageResults$thetaH0) / sqrt(piTreatments * (1 - piTreatments) + allocationRatioPlanned * piControls * (1 - piControls)) * sqrt(1 + allocationRatioPlanned) + adjustment } nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned ctr <- .performClosedCombinationTest(stageResults = stageResults) for (population in 1:gMax) { if (!is.na(ctr$separatePValues[population, stage])) { if (gMax == 1) { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, population] == 1, ][1:stage] } else { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, population] == 1, ][which.max( ctr$overallAdjustedTestStatistics[ctr$indices[, population] == 1, stage] ), 1:stage] } if (stage < kMax - 1) { for (k in (stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = standardizedEffect[population], stage = stage, nPlanned = nPlanned ) } results$conditionalPower[population, k] <- reject / iterations } results$simulated <- TRUE results$.setParameterType("simulated", C_PARAM_GENERATED) } else if (stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1 / weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Calculation not possible: could not calculate conditional power for stage ", kMax, call. = FALSE ) results$conditionalPower[population, kMax] <- NA_real_ } else { results$conditionalPower[population, kMax] <- 1 - stats::pnorm(.getQNorm(result) - standardizedEffect[population] * sqrt(nPlanned[kMax])) } } } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$piTreatments <- piTreatments results$piControls <- piControls return(results) } #' #' Calculation of conditional power and likelihood values for plotting the graph #' #' @noRd #' .getConditionalPowerLikelihoodRatesEnrichment <- function(..., stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, piTreatmentRange, piControls = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .associatedArgumentsAreDefined(nPlanned = nPlanned, piTreatmentRange = piTreatmentRange) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) design <- stageResults$.design kMax <- design$kMax gMax <- stageResults$getGMax() intersectionTest <- stageResults$intersectionTest piControls <- .assertIsValidPiControlForEnrichment(piControls, stageResults, stage) if (length(piControls) == 1) { piControls <- rep(piControls, gMax) } piTreatmentRange <- .assertIsValidPiTreatmentRange(piTreatmentRange = piTreatmentRange) populations <- numeric(gMax * length(piTreatmentRange)) effectValues <- numeric(gMax * length(piTreatmentRange)) condPowerValues <- numeric(gMax * length(piTreatmentRange)) likelihoodValues <- numeric(gMax * length(piTreatmentRange)) stdErr <- sqrt(stageResults$overallPisTreatment[, stage] * (1 - stageResults$overallPisTreatment[, stage])) / sqrt(stageResults$.overallSampleSizes2[, stage]) results <- ConditionalPowerResultsEnrichmentRates( .design = design, .stageResults = stageResults, piControls = piControls, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) j <- 1 for (i in seq(along = piTreatmentRange)) { for (population in (1:gMax)) { populations[j] <- population effectValues[j] <- piTreatmentRange[i] if (.isTrialDesignInverseNormal(design)) { condPowerValues[j] <- .getConditionalPowerRatesEnrichmentInverseNormal( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, piControls = piControls, piTreatments = piTreatmentRange[i] )$conditionalPower[population, kMax] } else if (.isTrialDesignFisher(design)) { condPowerValues[j] <- .getConditionalPowerRatesEnrichmentFisher( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, useAdjustment = FALSE, piControls = piControls, piTreatments = piTreatmentRange[i], iterations = iterations, seed = seed )$conditionalPower[population, kMax] } likelihoodValues[j] <- stats::dnorm(piTreatmentRange[i], stageResults$overallPisTreatment[population, stage], stdErr[population]) / stats::dnorm(0, 0, stdErr[population]) j <- j + 1 } } subtitle <- paste0( "Intersection test = ", intersectionTest, ", stage = ", stage, ", # of remaining subjects = ", sum(nPlanned), ", control rate = ", .formatSubTitleValue(piControls, "piControls"), ", allocation ratio = ", .formatSubTitleValue(allocationRatioPlanned, "allocationRatioPlanned") ) return(list( populations = populations, xValues = effectValues, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, xlab = "Treatment rate", ylab = C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD, sub = subtitle )) } rpact/R/f_analysis_enrichment_survival.R0000644000176200001440000014372514445307575020240 0ustar liggesusers## | ## | *Analysis of survival in enrichment designs with adaptive test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | ## | #' @include f_logger.R NULL #' #' @title #' Get Analysis Results Survival #' #' @description #' Returns an analysis result object. #' #' @param design The trial design. #' #' @return Returns a \code{AnalysisResultsSurvival} object. #' #' @keywords internal #' #' @noRd #' .calcSurvivalTestStatistics <- function(dataInput, subset, stage, thetaH0, stratifiedAnalysis, directionUpper = TRUE) { overallEvents <- NA_real_ testStatistics <- NA_real_ separatePValues <- NA_real_ overallAllocationRatios <- NA_real_ overallTestStatistics <- NA_real_ if (!all(is.na(dataInput$getOverallEvents(stage = stage, subset = subset)))) { overallEvents <- sum(dataInput$getOverallEvents(stage = stage, subset = subset), na.rm = TRUE) if (dataInput$isStratified()) { overallAllocationRatios <- sum(dataInput$getOverallAllocationRatios(stage = stage, subset = subset) * dataInput$getOverallEvents(stage = stage, subset = subset), na.rm = TRUE) / sum(dataInput$getOverallEvents(stage = stage, subset = subset), na.rm = TRUE) overallTestStatistics <- (sum(dataInput$getOverallEvents(stage = stage, subset = subset), na.rm = TRUE) - sum(dataInput$getOverallExpectedEvents(stage = stage, subset = subset), na.rm = TRUE)) / sqrt(sum(dataInput$getOverallVarianceEvents(stage = stage, subset = subset), na.rm = TRUE)) - sqrt(sum(dataInput$getOverallEvents(stage = stage, subset = subset), na.rm = TRUE)) * sqrt(overallAllocationRatios) / (1 + overallAllocationRatios) * log(thetaH0) if (stage == 1) { testStatistics <- overallTestStatistics } else { testStatistics <- (sqrt(sum(dataInput$getOverallEvents(stage = stage, subset = subset), na.rm = TRUE)) * (sum(dataInput$getOverallEvents(stage = stage, subset = subset), na.rm = TRUE) - sum(dataInput$getOverallExpectedEvents(stage = stage, subset = subset), na.rm = TRUE)) / sqrt(sum(dataInput$getOverallVarianceEvents(stage = stage, subset = subset), na.rm = TRUE)) - sqrt(sum(dataInput$getOverallEvents(stage = stage - 1, subset = subset), na.rm = TRUE)) * (sum(dataInput$getOverallEvents(stage = stage - 1, subset = subset) - dataInput$getOverallExpectedEvents(stage = stage - 1, subset = subset), na.rm = TRUE)) / sqrt(sum(dataInput$getOverallVarianceEvents(stage = stage - 1, subset = subset), na.rm = TRUE))) / sqrt(sum(dataInput$getOverallEvents(stage = stage, subset = subset) - dataInput$getOverallEvents(stage = stage - 1, subset = subset), na.rm = TRUE)) - sqrt(sum(dataInput$getOverallEvents(stage = stage, subset = subset) - dataInput$getOverallEvents(stage = stage - 1, subset = subset), na.rm = TRUE)) * sqrt(overallAllocationRatios) / (1 + overallAllocationRatios) * log(thetaH0) } } # non-stratified data input else { overallTestStatistics <- dataInput$getOverallLogRanks(stage = stage, subset = subset) - sqrt(dataInput$getOverallEvents(stage = stage, subset = subset)) * sqrt(dataInput$getOverallAllocationRatios(stage = stage, subset = subset)) / (1 + dataInput$getOverallAllocationRatios(stage = stage, subset = subset)) * log(thetaH0) testStatistics <- dataInput$getLogRanks(stage = stage, subset = subset) - sqrt(dataInput$getEvents(stage = stage, subset = subset)) * sqrt(dataInput$getAllocationRatios(stage = stage, subset = subset)) / (1 + dataInput$getAllocationRatios(stage = stage, subset = subset)) * log(thetaH0) overallAllocationRatios <- dataInput$getOverallAllocationRatios(stage = stage, subset = subset) } if (directionUpper) { separatePValues <- 1 - stats::pnorm(testStatistics) } else { separatePValues <- stats::pnorm(testStatistics) } } if (("R" %in% subset) && is.na(dataInput$getOverallEvents(stage = stage, subset = "R")) || ("S1" %in% subset) && is.na(dataInput$getOverallEvents(stage = stage, subset = "S1")) || ("S2" %in% subset) && is.na(dataInput$getOverallEvents(stage = stage, subset = "S2")) || ("S3" %in% subset) && is.na(dataInput$getOverallEvents(stage = stage, subset = "S3")) || ("S4" %in% subset) && is.na(dataInput$getOverallEvents(stage = stage, subset = "S4")) ) { overallEvents <- NA_real_ separatePValues <- NA_real_ testStatistics <- NA_real_ overallAllocationRatios <- NA_real_ overallTestStatistics <- NA_real_ } return(list( overallEvents = overallEvents, separatePValues = separatePValues, testStatistics = testStatistics, overallAllocationRatios = overallAllocationRatios, overallTestStatistics = overallTestStatistics )) } .getStageResultsSurvivalEnrichment <- function(..., design, dataInput, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, calculateSingleStepAdjusted = FALSE, userFunctionCallEnabled = FALSE) { .assertIsTrialDesign(design) .assertIsDatasetSurvival(dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertIsValidDirectionUpper(directionUpper, design$sided) .assertIsSingleLogical(calculateSingleStepAdjusted, "calculateSingleStepAdjusted") .warnInCaseOfUnknownArguments( functionName = ".getStageResultsSurvivalEnrichment", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) kMax <- design$kMax if (dataInput$isStratified()) { gMax <- log(length(levels(factor(dataInput$subsets))), 2) + 1 } else { gMax <- length(levels(factor(dataInput$subsets))) } .assertIsValidIntersectionTestEnrichment(design, intersectionTest) if (gMax > 2 && intersectionTest == "SpiessensDebois") { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "gMax (", gMax, ") > 2: Spiessens & Debois intersection test test can only be used for one subset" ) } if (!stratifiedAnalysis) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "only stratified analysis can be performed for enrichment survival designs" ) } if (dataInput$isStratified() && gMax > 4) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "gMax (", gMax, ") > 4: Stratified analysis not implemented" ) } stageResults <- StageResultsEnrichmentSurvival( design = design, dataInput = dataInput, intersectionTest = intersectionTest, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, stage = stage ) .setValueAndParameterType( stageResults, "stratifiedAnalysis", stratifiedAnalysis, C_STRATIFIED_ANALYSIS_DEFAULT ) .setValueAndParameterType( stageResults, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_ENRICHMENT_DEFAULT ) effectSizes <- matrix(NA_real_, nrow = gMax, ncol = kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallEvents <- matrix(NA_real_, nrow = gMax, ncol = kMax) dimnames(testStatistics) <- list(paste("population ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) dimnames(separatePValues) <- list(paste("population ", 1:gMax, sep = ""), paste("stage ", (1:kMax), sep = "")) subsets <- .createSubsetsByGMax(gMax = gMax, stratifiedInput = dataInput$isStratified(), subsetIdPrefix = "S") for (k in 1:stage) { for (population in 1:gMax) { subset <- subsets[[population]] results <- .calcSurvivalTestStatistics( dataInput, subset, k, thetaH0, stratifiedAnalysis, directionUpper ) effectSizes[population, k] <- thetaH0 * exp(results$overallTestStatistics * (1 + results$overallAllocationRatios) / sqrt(results$overallAllocationRatios * results$overallEvents)) overallTestStatistics[population, k] <- results$overallTestStatistics testStatistics[population, k] <- results$testStatistics separatePValues[population, k] <- results$separatePValues overallEvents[population, k] <- results$overallEvents } } .setWeightsToStageResults(design, stageResults) # calculation of single stage adjusted p-Values and overall test statistics for determination of RCIs if (calculateSingleStepAdjusted) { singleStepAdjustedPValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) combInverseNormal <- matrix(NA_real_, nrow = gMax, ncol = kMax) combFisher <- matrix(NA_real_, nrow = gMax, ncol = kMax) if (.isTrialDesignInverseNormal(design)) { weightsInverseNormal <- stageResults$weightsInverseNormal } else if (.isTrialDesignFisher(design)) { weightsFisher <- stageResults$weightsFisher } for (k in 1:stage) { selected <- sum(!is.na(separatePValues[, k])) for (population in 1:gMax) { if ((intersectionTest == "Bonferroni") || (intersectionTest == "Simes")) { singleStepAdjustedPValues[population, k] <- min(1, separatePValues[population, k] * selected) } else if (intersectionTest == "Sidak") { singleStepAdjustedPValues[population, k] <- 1 - (1 - separatePValues[population, k])^selected } else if (intersectionTest == "SpiessensDebois") { if (!is.na(testStatistics[population, k])) { df <- NA_real_ sigma <- 1 if (selected == 2) { if (dataInput$isStratified()) { sigma <- matrix(rep(sqrt(dataInput$getEvents(stage = k, subset = "S1") / sum(dataInput$getEvents(stage = k))), 4), nrow = 2) } else { sigma <- matrix(rep(sqrt(dataInput$getEvents(stage = k, subset = "S1") / dataInput$getEvents(stage = k, subset = "F")), 4), nrow = 2) } diag(sigma) <- 1 } singleStepAdjustedPValues[population, k] <- 1 - .getMultivariateDistribution( type = "normal", upper = ifelse(directionUpper, testStatistics[population, k], -testStatistics[population, k]), sigma = sigma, df = NA ) } } if (.isTrialDesignInverseNormal(design)) { combInverseNormal[population, k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(singleStepAdjustedPValues[population, 1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) } else if (.isTrialDesignFisher(design)) { combFisher[population, k] <- prod(singleStepAdjustedPValues[population, 1:k]^weightsFisher[1:k]) } } } stageResults$overallTestStatistics <- overallTestStatistics stageResults$effectSizes <- effectSizes stageResults$testStatistics <- testStatistics stageResults$separatePValues <- separatePValues stageResults$singleStepAdjustedPValues <- singleStepAdjustedPValues stageResults$.setParameterType("singleStepAdjustedPValues", C_PARAM_GENERATED) if (.isTrialDesignFisher(design)) { stageResults$combFisher <- combFisher stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$combInverseNormal <- combInverseNormal stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) } } else { stageResults$overallTestStatistics <- overallTestStatistics stageResults$.overallEvents <- overallEvents stageResults$effectSizes <- effectSizes stageResults$testStatistics <- testStatistics stageResults$separatePValues <- separatePValues } return(stageResults) } .getAnalysisResultsSurvivalEnrichment <- function(..., design, dataInput) { if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsSurvivalInverseNormalEnrichment( design = design, dataInput = dataInput, ... )) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsSurvivalFisherEnrichment( design = design, dataInput = dataInput, ... )) } .stopWithWrongDesignMessageEnrichment(design) } .getAnalysisResultsSurvivalInverseNormalEnrichment <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsSurvivalInverseNormalEnrichment", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsEnrichmentInverseNormal(design = design, dataInput = dataInput) results <- .getAnalysisResultsSurvivalEnrichmentAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance ) return(results) } .getAnalysisResultsSurvivalFisherEnrichment <- function(..., design, dataInput, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsSurvivalFisherEnrichment", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsEnrichmentFisher(design = design, dataInput = dataInput) .setValueAndParameterType(results, "iterations", as.integer(iterations), C_ITERATIONS_DEFAULT) .setValueAndParameterType(results, "seed", seed, NA_real_) results <- .getAnalysisResultsSurvivalEnrichmentAll( results = results, design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } .getAnalysisResultsSurvivalEnrichmentAll <- function(..., results, design, dataInput, intersectionTest, stage, directionUpper, stratifiedAnalysis, thetaH0, thetaH1, nPlanned, allocationRatioPlanned, tolerance, iterations, seed) { startTime <- Sys.time() stageResults <- .getStageResultsSurvivalEnrichment( design = design, dataInput = dataInput, intersectionTest = intersectionTest, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis ) results$.setStageResults(stageResults) .logProgress("Stage results calculated", startTime = startTime) thetaH1 <- .assertIsValidThetaH1ForEnrichment(thetaH1, stageResults, stage, results = results) .setValueAndParameterType(results, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_ENRICHMENT_DEFAULT) .setValueAndParameterType(results, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType(results, "stratifiedAnalysis", stratifiedAnalysis, C_STRATIFIED_ANALYSIS_DEFAULT) .setValueAndParameterType(results, "thetaH0", thetaH0, C_THETA_H0_MEANS_DEFAULT) .setConditionalPowerArguments(results, dataInput, nPlanned, allocationRatioPlanned) .setNPlannedAndThetaH1(results, nPlanned, thetaH1) startTime <- Sys.time() results$.closedTestResults <- getClosedCombinationTestResults(stageResults = stageResults) .logProgress("Closed test calculated", startTime = startTime) if (design$kMax > 1) { # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { results$.conditionalPowerResults <- .getConditionalPowerSurvivalEnrichment( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, iterations = iterations, seed = seed ) .synchronizeIterationsAndSeed(results) } else { results$.conditionalPowerResults <- .getConditionalPowerSurvivalEnrichment( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1 ) results$conditionalPower <- results$.conditionalPowerResults$conditionalPower results$.setParameterType("conditionalPower", C_PARAM_GENERATED) } results$thetaH1 <- matrix(results$.conditionalPowerResults$thetaH1, ncol = 1) .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() results$conditionalRejectionProbabilities <- .getConditionalRejectionProbabilitiesEnrichment( stageResults = stageResults, stage = stage, iterations = iterations, seed = seed ) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_GENERATED) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) } else { results$.setParameterType("conditionalPower", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalPowerSimulated", C_PARAM_NOT_APPLICABLE) results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_NOT_APPLICABLE) } # RCI - repeated confidence interval repeatedConfidenceIntervalLowerBounds <- numeric(0) repeatedConfidenceIntervalUpperBounds <- numeric(0) startTime <- Sys.time() repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsSurvivalEnrichment( design = design, dataInput = dataInput, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, stage = stage, tolerance = tolerance ) gMax <- stageResults$getGMax() results$repeatedConfidenceIntervalLowerBounds <- matrix(rep(NA_real_, gMax * design$kMax), nrow = gMax, ncol = design$kMax) results$repeatedConfidenceIntervalUpperBounds <- results$repeatedConfidenceIntervalLowerBounds for (k in 1:design$kMax) { for (population in 1:gMax) { results$repeatedConfidenceIntervalLowerBounds[population, k] <- repeatedConfidenceIntervals[population, 1, k] results$repeatedConfidenceIntervalUpperBounds[population, k] <- repeatedConfidenceIntervals[population, 2, k] } } results$.setParameterType("repeatedConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("repeatedConfidenceIntervalUpperBounds", C_PARAM_GENERATED) # repeated p-value results$repeatedPValues <- .getRepeatedPValuesEnrichment(stageResults = stageResults, tolerance = tolerance) results$.setParameterType("repeatedPValues", C_PARAM_GENERATED) message("Test statistics from full (and sub-populations) need to be stratified log-rank tests") return(results) } .getRootThetaSurvivalEnrichment <- function(..., design, dataInput, treatmentArm, stage, directionUpper, stratifiedAnalysis, intersectionTest, thetaLow, thetaUp, firstParameterName, secondValue, tolerance) { result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsSurvivalEnrichment( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][treatmentArm, stage] return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, callingFunctionInformation = ".getRootThetaSurvivalEnrichment" ) return(result) } .getUpperLowerThetaSurvivalEnrichment <- function(..., design, dataInput, theta, treatmentArm, stage, directionUpper, conditionFunction, stratifiedAnalysis, intersectionTest, firstParameterName, secondValue) { stageResults <- .getStageResultsSurvivalEnrichment( design = design, dataInput = dataInput, stage = stage, thetaH0 = exp(theta), directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][treatmentArm, stage] maxSearchIterations <- 30 while (conditionFunction(secondValue, firstValue)) { theta <- 2 * theta stageResults <- .getStageResultsSurvivalEnrichment( design = design, dataInput = dataInput, stage = stage, thetaH0 = exp(theta), directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, calculateSingleStepAdjusted = TRUE ) firstValue <- stageResults[[firstParameterName]][treatmentArm, stage] maxSearchIterations <- maxSearchIterations - 1 if (maxSearchIterations < 0) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, sprintf( paste0( "failed to find theta (k = %s, firstValue = %s, ", "secondValue = %s, levels(firstValue) = %s, theta = %s)" ), stage, stageResults[[firstParameterName]][treatmentArm, stage], secondValue, firstValue, theta ) ) } } return(theta) } .getRepeatedConfidenceIntervalsSurvivalEnrichmentAll <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { .assertIsValidIntersectionTestEnrichment(design, intersectionTest) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) stageResults <- .getStageResultsSurvivalEnrichment( design = design, dataInput = dataInput, stage = stage, thetaH0 = 1, directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, calculateSingleStepAdjusted = FALSE ) gMax <- stageResults$getGMax() repeatedConfidenceIntervals <- array(NA_real_, dim = c(gMax, 2, design$kMax)) # Repeated onfidence intervals when using combination tests if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT criticalValues <- design$criticalValues conditionFunction <- .isFirstValueSmallerThanSecondValue } else if (.isTrialDesignInverseNormal(design)) { bounds <- design$futilityBounds border <- C_FUTILITY_BOUNDS_DEFAULT criticalValues <- design$criticalValues criticalValues[is.infinite(criticalValues) & criticalValues > 0] <- C_QNORM_MAXIMUM criticalValues[is.infinite(criticalValues) & criticalValues < 0] <- C_QNORM_MINIMUM conditionFunction <- .isFirstValueGreaterThanSecondValue } if (any(is.na(criticalValues[1:stage]))) { warning("Repeated confidence intervals not because ", sum(is.na(criticalValues)), " critical values are NA (", .arrayToString(criticalValues), ")", call. = FALSE ) return(repeatedConfidenceIntervals) } # necessary for adjustment for binding futility boundaries futilityCorr <- rep(NA_real_, design$kMax) stages <- (1:stage) for (k in stages) { startTime <- Sys.time() for (population in 1:gMax) { if (!is.na(stageResults$testStatistics[population, k]) && criticalValues[k] < C_QNORM_MAXIMUM) { # Finding maximum upper and minimum lower bounds for RCIs thetaLow <- exp(.getUpperLowerThetaSurvivalEnrichment( design = design, dataInput = dataInput, theta = -1, treatmentArm = population, stage = k, directionUpper = TRUE, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k] )) thetaUp <- exp(.getUpperLowerThetaSurvivalEnrichment( design = design, dataInput = dataInput, theta = 1, treatmentArm = population, stage = k, directionUpper = FALSE, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k] )) # finding upper and lower RCI limits through root function repeatedConfidenceIntervals[population, 1, k] <- .getRootThetaSurvivalEnrichment( design = design, dataInput = dataInput, treatmentArm = population, stage = k, directionUpper = TRUE, thetaLow = thetaLow, thetaUp = thetaUp, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) repeatedConfidenceIntervals[population, 2, k] <- .getRootThetaSurvivalEnrichment( design = design, dataInput = dataInput, treatmentArm = population, stage = k, directionUpper = FALSE, thetaLow = thetaLow, thetaUp = thetaUp, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance ) # adjustment for binding futility bounds if (k > 1 && !is.na(bounds[k - 1]) && conditionFunction(bounds[k - 1], border) && design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "singleStepAdjustedPValues", firstParameterName ) # Calculate new lower and upper bounds if (directionUpper) { thetaLow <- tolerance } else { thetaUp <- .getUpperLowerThetaSurvivalEnrichment( design = design, dataInput = dataInput, theta = 1, treatmentArm = population, stage = k - 1, directionUpper = FALSE, conditionFunction = conditionFunction, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1] ) } futilityCorr[k] <- .getRootThetaSurvivalEnrichment( design = design, dataInput = dataInput, treatmentArm = population, stage = k - 1, directionUpper = directionUpper, thetaLow = thetaLow, thetaUp = thetaUp, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance ) if (directionUpper) { repeatedConfidenceIntervals[population, 1, k] <- min( min(futilityCorr[2:k]), repeatedConfidenceIntervals[population, 1, k] ) } else { repeatedConfidenceIntervals[population, 2, k] <- max( max(futilityCorr[2:k]), repeatedConfidenceIntervals[population, 2, k] ) } } if (!is.na(repeatedConfidenceIntervals[population, 1, k]) && !is.na(repeatedConfidenceIntervals[population, 2, k]) && repeatedConfidenceIntervals[population, 1, k] > repeatedConfidenceIntervals[population, 2, k]) { repeatedConfidenceIntervals[population, , k] <- rep(NA_real_, 2) } } } .logProgress("Repeated confidence intervals for stage %s calculated", startTime = startTime, k) } return(repeatedConfidenceIntervals) } #' #' RCIs based on inverse normal combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsSurvivalEnrichmentInverseNormal <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsSurvivalEnrichmentInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsSurvivalEnrichmentAll( design = design, dataInput = dataInput, directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combInverseNormal", ... )) } #' #' RCIs based on Fisher's combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsSurvivalEnrichmentFisher <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, stratifiedAnalysis = C_STRATIFIED_ANALYSIS_DEFAULT, intersectionTest = C_INTERSECTION_TEST_ENRICHMENT_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsSurvivalEnrichmentFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsSurvivalEnrichmentAll( design = design, dataInput = dataInput, directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, intersectionTest = intersectionTest, tolerance = tolerance, firstParameterName = "combFisher", ... )) } #' #' Calculation of lower and upper limits of repeated confidence intervals (RCIs) for Survival #' #' @noRd #' .getRepeatedConfidenceIntervalsSurvivalEnrichment <- function(..., design) { if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsSurvivalEnrichmentInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsSurvivalEnrichmentFisher(design = design, ...)) } .stopWithWrongDesignMessageEnrichment(design) } #' #' Calculation of conditional power for Survival #' #' @noRd #' .getConditionalPowerSurvivalEnrichment <- function(..., stageResults, stage = stageResults$stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaH1 = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { design <- stageResults$.design gMax <- stageResults$getGMax() kMax <- design$kMax results <- ConditionalPowerResultsEnrichmentSurvival( .design = design, .stageResults = stageResults, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) if (any(is.na(nPlanned))) { return(results) } .assertIsValidStage(stage, kMax) if (stage == kMax) { .logDebug( "Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", kMax, ")" ) return(results) } if (!.isValidNPlanned(nPlanned = nPlanned, kMax = kMax, stage = stage)) { return(results) } .assertIsValidNPlanned(nPlanned, kMax, stage) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) results$.setParameterType("nPlanned", C_PARAM_USER_DEFINED) results$.setParameterType( "allocationRatioPlanned", ifelse(allocationRatioPlanned == C_ALLOCATION_RATIO_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED) ) thetaH1 <- .assertIsValidThetaH1ForEnrichment(thetaH1, stageResults, stage, results = results) if (any(thetaH1 <= 0, na.rm = TRUE)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'thetaH1' (", thetaH1, ") must be > 0") } if ((length(thetaH1) != 1) && (length(thetaH1) != gMax)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf(paste0( "length of 'thetaH1' (%s) must be ", "equal to 'gMax' (%s) or 1" ), .arrayToString(thetaH1), gMax) ) } if (.isTrialDesignInverseNormal(design)) { return(.getConditionalPowerSurvivalEnrichmentInverseNormal( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, ... )) } else if (.isTrialDesignFisher(design)) { return(.getConditionalPowerSurvivalEnrichmentFisher( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, iterations = iterations, seed = seed, ... )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'design' must be an instance of TrialDesignInverseNormal or TrialDesignFisher" ) } #' #' Calculation of conditional power based on inverse normal method #' #' @noRd #' .getConditionalPowerSurvivalEnrichmentInverseNormal <- function(..., results, design, stageResults, stage, allocationRatioPlanned, nPlanned, thetaH1) { .assertIsTrialDesignInverseNormal(design) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerSurvivalEnrichmentInverseNormal", ...) kMax <- design$kMax gMax <- stageResults$getGMax() weights <- .getWeightsInverseNormal(design) informationRates <- design$informationRates nPlanned <- c(rep(NA_real_, stage), nPlanned) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned .setValueAndParameterType(results, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) if (length(thetaH1) == 1) { thetaH1 <- rep(thetaH1, gMax) results$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { results$.setParameterType("thetaH1", C_PARAM_DEFAULT_VALUE) } if (stageResults$directionUpper) { standardizedEffect <- log(thetaH1 / stageResults$thetaH0) } else { standardizedEffect <- -log(thetaH1 / stageResults$thetaH0) } ctr <- .performClosedCombinationTest(stageResults = stageResults) criticalValues <- design$criticalValues for (population in 1:gMax) { if (!is.na(ctr$separatePValues[population, stage])) { # shifted decision region for use in getGroupSeqProbs # Inverse Normal Method shiftedDecisionRegionUpper <- criticalValues[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, population] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - standardizedEffect[population] * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - min(ctr$overallAdjustedTestStatistics[ctr$indices[, population] == 1, stage], na.rm = TRUE) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - standardizedEffect[population] * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) results$conditionalPower[population, (stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$thetaH1 <- thetaH1 return(results) } #' #' Calculation of conditional power based on Fisher's combination test #' #' @noRd #' .getConditionalPowerSurvivalEnrichmentFisher <- function(..., results, design, stageResults, stage, allocationRatioPlanned, nPlanned, thetaH1, iterations, seed) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) .warnInCaseOfUnknownArguments(functionName = ".getConditionalPowerSurvivalEnrichmentFisher", ...) kMax <- design$kMax gMax <- stageResults$getGMax() criticalValues <- design$criticalValues weightsFisher <- .getWeightsFisher(design) results$iterations <- as.integer(iterations) results$.setParameterType("iterations", C_PARAM_USER_DEFINED) results$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) results$seed <- .setSeed(seed) results$simulated <- FALSE results$.setParameterType("simulated", C_PARAM_DEFAULT_VALUE) .setValueAndParameterType(results, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT) if (length(thetaH1) == 1) { thetaH1 <- rep(thetaH1, gMax) results$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { results$.setParameterType("thetaH1", C_PARAM_DEFAULT_VALUE) } if (stageResults$directionUpper) { standardizedEffect <- log(thetaH1 / stageResults$thetaH0) } else { standardizedEffect <- -log(thetaH1 / stageResults$thetaH0) } nPlanned <- c(rep(NA_real_, stage), nPlanned) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned ctr <- .performClosedCombinationTest(stageResults = stageResults) for (population in 1:gMax) { if (!is.na(ctr$separatePValues[population, stage])) { if (gMax == 1) { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, population] == 1, ][1:stage] } else { pValues <- ctr$adjustedStageWisePValues[ctr$indices[, population] == 1, ][which.max( ctr$overallAdjustedTestStatistics[ctr$indices[, population] == 1, stage] ), 1:stage] } if (stage < kMax - 1) { for (k in (stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = standardizedEffect[population], stage = stage, nPlanned = nPlanned ) } results$conditionalPower[population, k] <- reject / iterations } results$simulated <- TRUE results$.setParameterType("simulated", C_PARAM_GENERATED) } else if (stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1 / weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Calculation not possible: could not calculate conditional power for stage ", kMax, call. = FALSE) results$conditionalPower[population, kMax] <- NA_real_ } else { results$conditionalPower[population, kMax] <- 1 - stats::pnorm(.getQNorm(result) - standardizedEffect[population] * sqrt(nPlanned[kMax])) } } } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned results$nPlanned <- nPlanned results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$thetaH1 <- thetaH1 return(results) } #' #' Calculation of conditional power and likelihood values for plotting the graph #' #' @noRd #' .getConditionalPowerLikelihoodSurvivalEnrichment <- function(..., stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaRange, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) .associatedArgumentsAreDefined(nPlanned = nPlanned, thetaRange = thetaRange) design <- stageResults$.design kMax <- design$kMax gMax <- stageResults$getGMax() intersectionTest <- stageResults$intersectionTest thetaRange <- .assertIsValidThetaH1ForEnrichment(thetaH1 = thetaRange) if (length(thetaRange) == 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'thetaRange' (", .arrayToString(thetaRange), ") must be at least 2" ) } populations <- numeric(gMax * length(thetaRange)) effectValues <- numeric(gMax * length(thetaRange)) condPowerValues <- numeric(gMax * length(thetaRange)) likelihoodValues <- numeric(gMax * length(thetaRange)) stdErr <- 2 / sqrt(stageResults$.overallEvents[, stage]) results <- ConditionalPowerResultsEnrichmentSurvival( .design = design, .stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned ) j <- 1 for (i in seq(along = thetaRange)) { for (population in (1:gMax)) { populations[j] <- population effectValues[j] <- thetaRange[i] if (.isTrialDesignInverseNormal(design)) { condPowerValues[j] <- .getConditionalPowerSurvivalEnrichmentInverseNormal( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], ... )$conditionalPower[population, kMax] } else if (.isTrialDesignFisher(design)) { condPowerValues[j] <- .getConditionalPowerSurvivalEnrichmentFisher( results = results, design = design, stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i], iterations = iterations, seed = seed, ... )$conditionalPower[population, kMax] } likelihoodValues[j] <- stats::dnorm( log(thetaRange[i]), log(stageResults$effectSizes[population, stage]), stdErr[population] ) / stats::dnorm(0, 0, stdErr[population]) j <- j + 1 } } subtitle <- paste0( "Intersection test = ", intersectionTest, ", Stage = ", stage, ", # of remaining events = ", sum(nPlanned), ", allocation ratio = ", .formatSubTitleValue(allocationRatioPlanned, "allocationRatioPlanned") ) return(list( populations = populations, xValues = effectValues, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, xlab = "Hazard ratio", ylab = C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD, sub = subtitle )) } rpact/R/f_design_sample_size_calculator.R0000644000176200001440000070145314445307575020321 0ustar liggesusers## | ## | *Sample size calculator* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R NULL .addEffectScaleBoundaryDataToDesignPlan <- function(designPlan) { .assertIsTrialDesignPlan(designPlan) design <- designPlan$.design if (.isTrialDesignPlanMeans(designPlan)) { if (design$kMax == 1 && designPlan$.isSampleSizeObject()) { designPlan$maxNumberOfSubjects <- designPlan$nFixed } boundaries <- .getEffectScaleBoundaryDataMeans(designPlan) } else if (.isTrialDesignPlanRates(designPlan)) { if (designPlan$.isSampleSizeObject()) { # comes from getSampleSize if (designPlan$groups == 1) { designPlan$directionUpper <- (designPlan$pi1 > designPlan$thetaH0) } else { if (designPlan$riskRatio) { designPlan$directionUpper <- (designPlan$pi1 / designPlan$pi2 > designPlan$thetaH0) } else { designPlan$directionUpper <- (designPlan$pi1 - designPlan$pi2 > designPlan$thetaH0) } } designPlan$.setParameterType("directionUpper", C_PARAM_GENERATED) } if (design$kMax == 1 && designPlan$.isSampleSizeObject()) { designPlan$maxNumberOfSubjects <- designPlan$nFixed } boundaries <- .getEffectScaleBoundaryDataRates(designPlan) } else if (.isTrialDesignPlanSurvival(designPlan)) { if (designPlan$.isSampleSizeObject()) { # comes from getSampleSize designPlan$directionUpper <- (designPlan$hazardRatio > designPlan$thetaH0) designPlan$.setParameterType("directionUpper", C_PARAM_GENERATED) } if (design$kMax == 1 && designPlan$.isSampleSizeObject()) { designPlan$eventsPerStage <- matrix(designPlan$eventsFixed, nrow = 1) } boundaries <- .getEffectScaleBoundaryDataSurvival(designPlan) } if (designPlan$.design$sided == 1) { designPlan$criticalValuesEffectScale <- boundaries$criticalValuesEffectScaleUpper designPlan$.setParameterType("criticalValuesEffectScale", C_PARAM_GENERATED) } else { if (all(boundaries$criticalValuesEffectScaleLower < boundaries$criticalValuesEffectScaleUpper, na.rm = TRUE)) { designPlan$criticalValuesEffectScaleLower <- boundaries$criticalValuesEffectScaleLower designPlan$criticalValuesEffectScaleUpper <- boundaries$criticalValuesEffectScaleUpper } else { designPlan$criticalValuesEffectScaleLower <- boundaries$criticalValuesEffectScaleUpper designPlan$criticalValuesEffectScaleUpper <- boundaries$criticalValuesEffectScaleLower } designPlan$.setParameterType("criticalValuesEffectScaleUpper", C_PARAM_GENERATED) designPlan$.setParameterType("criticalValuesEffectScaleLower", C_PARAM_GENERATED) } if (!.isTrialDesignFisher(design) && any(design$futilityBounds > C_FUTILITY_BOUNDS_DEFAULT)) { if (design$sided == 1) { designPlan$futilityBoundsEffectScale <- round(boundaries$futilityBoundsEffectScaleUpper, 8) designPlan$.setParameterType("futilityBoundsEffectScale", C_PARAM_GENERATED) } else { if (all(designPlan$futilityBoundsEffectScaleLower < designPlan$futilityBoundsEffectScaleUpper, na.rm = TRUE)) { designPlan$futilityBoundsEffectScaleLower <- round(boundaries$futilityBoundsEffectScaleLower, 8) designPlan$futilityBoundsEffectScaleUpper <- round(boundaries$futilityBoundsEffectScaleUpper, 8) } else { designPlan$futilityBoundsEffectScaleLower <- round(boundaries$futilityBoundsEffectScaleUpper, 8) designPlan$futilityBoundsEffectScaleUpper <- round(boundaries$futilityBoundsEffectScaleLower, 8) } designPlan$.setParameterType("futilityBoundsEffectScaleLower", C_PARAM_GENERATED) designPlan$.setParameterType("futilityBoundsEffectScaleUpper", C_PARAM_GENERATED) } } } .getEffectScaleBoundaryDataMeans <- function(designPlan) { design <- designPlan$.design thetaH0 <- designPlan$thetaH0 stDev <- designPlan$stDev maxNumberOfSubjects <- designPlan$maxNumberOfSubjects allocationRatioPlanned <- designPlan$allocationRatioPlanned directionUpper <- designPlan$directionUpper # initialize effect scale matrix futilityBoundsEffectScaleUpper <- rep(NA_real_, design$kMax - 1) futilityBoundsEffectScaleLower <- rep(NA_real_, design$kMax - 1) if (designPlan$normalApproximation) { criticalValues <- design$criticalValues futilityBounds <- design$futilityBounds } else { criticalValues <- stats::qt( 1 - design$stageLevels, design$informationRates %*% t(maxNumberOfSubjects) - designPlan$groups ) # outside validated range numberOfNAs <- sum(as.vector(criticalValues) > 50, na.rm = TRUE) criticalValues[criticalValues > 50] <- NA_real_ if (any(is.na(criticalValues) & (design$criticalValues < 8))) { warning("The computation of ", .integerToWrittenNumber(numberOfNAs), " efficacy boundar", ifelse(numberOfNAs == 1, "y", "ies"), " on ", "treatment effect scale not performed presumably due to too small df", call. = FALSE ) } futilityBounds <- stats::qt( stats::pnorm(design$futilityBounds), design$informationRates[1:(design$kMax - 1)] %*% t(maxNumberOfSubjects) - designPlan$groups ) # outside validated range futilityBounds[futilityBounds < -50] <- NA_real_ } futilityBounds[!is.na(futilityBounds) & futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- NA_real_ if (designPlan$groups == 1) { criticalValuesEffectScaleUpper <- thetaH0 + criticalValues * stDev / sqrt(design$informationRates %*% t(maxNumberOfSubjects)) criticalValuesEffectScaleLower <- thetaH0 - criticalValues * stDev / sqrt(design$informationRates %*% t(maxNumberOfSubjects)) if (!.isTrialDesignFisher(design) && !all(is.na(futilityBounds))) { futilityBoundsEffectScaleUpper <- thetaH0 + futilityBounds * stDev / sqrt(design$informationRates[1:(design$kMax - 1)] %*% t(maxNumberOfSubjects)) } if (!.isTrialDesignFisher(design) && design$sided == 2 && design$kMax > 1 && (design$typeOfDesign == C_TYPE_OF_DESIGN_PT || !is.null(design$typeBetaSpending) && design$typeBetaSpending != "none")) { futilityBoundsEffectScaleLower <- thetaH0 - futilityBounds * stDev / sqrt(design$informationRates[1:(design$kMax - 1)] %*% t(maxNumberOfSubjects)) } } else if (!designPlan$meanRatio) { criticalValuesEffectScaleUpper <- thetaH0 + criticalValues * stDev * (1 + allocationRatioPlanned) / (sqrt(allocationRatioPlanned * design$informationRates %*% t(maxNumberOfSubjects))) criticalValuesEffectScaleLower <- thetaH0 - criticalValues * stDev * (1 + allocationRatioPlanned) / (sqrt(allocationRatioPlanned * design$informationRates %*% t(maxNumberOfSubjects))) if (!.isTrialDesignFisher(design) && !all(is.na(futilityBounds))) { futilityBoundsEffectScaleUpper <- thetaH0 + futilityBounds * stDev * (1 + allocationRatioPlanned) / (sqrt(allocationRatioPlanned * design$informationRates[1:(design$kMax - 1)] %*% t(maxNumberOfSubjects))) } if (!.isTrialDesignFisher(design) && design$sided == 2 && design$kMax > 1 && (design$typeOfDesign == C_TYPE_OF_DESIGN_PT || !is.null(design$typeBetaSpending) && design$typeBetaSpending != "none")) { futilityBoundsEffectScaleLower <- thetaH0 - futilityBounds * stDev * (1 + allocationRatioPlanned) / (sqrt(allocationRatioPlanned * design$informationRates[1:(design$kMax - 1)] %*% t(maxNumberOfSubjects))) } } else { criticalValuesEffectScaleUpper <- thetaH0 + criticalValues * stDev * sqrt(1 + 1 / allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned)) / (sqrt(design$informationRates %*% t(maxNumberOfSubjects))) criticalValuesEffectScaleLower <- thetaH0 - criticalValues * stDev * sqrt(1 + 1 / allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned)) / (sqrt(design$informationRates %*% t(maxNumberOfSubjects))) if (!.isTrialDesignFisher(design) && !all(is.na(futilityBounds))) { futilityBoundsEffectScaleUpper <- thetaH0 + futilityBounds * stDev * sqrt(1 + 1 / allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned)) / (sqrt(design$informationRates[1:(design$kMax - 1)] %*% t(maxNumberOfSubjects))) } if (!.isTrialDesignFisher(design) && design$sided == 2 && design$kMax > 1 && (design$typeOfDesign == C_TYPE_OF_DESIGN_PT || !is.null(design$typeBetaSpending) && design$typeBetaSpending != "none")) { futilityBoundsEffectScaleLower <- thetaH0 - futilityBounds * stDev * sqrt(1 + 1 / allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned)) / (sqrt(design$informationRates[1:(design$kMax - 1)] %*% t(maxNumberOfSubjects))) } } directionUpper[is.na(directionUpper)] <- TRUE if (length(directionUpper) > 0 && all(!directionUpper)) { criticalValuesEffectScaleUpper <- -criticalValuesEffectScaleUpper + 2 * thetaH0 criticalValuesEffectScaleLower <- -criticalValuesEffectScaleLower + 2 * thetaH0 if (!all(is.na(futilityBoundsEffectScaleUpper))) { futilityBoundsEffectScaleUpper <- -futilityBoundsEffectScaleUpper + 2 * thetaH0 futilityBoundsEffectScaleLower <- -futilityBoundsEffectScaleLower + 2 * thetaH0 } } if (designPlan$meanRatio) { criticalValuesEffectScaleUpper[!is.na(criticalValuesEffectScaleUpper) & criticalValuesEffectScaleUpper <= 0] <- NA_real_ criticalValuesEffectScaleLower[!is.na(criticalValuesEffectScaleLower) & criticalValuesEffectScaleLower <= 0] <- NA_real_ futilityBoundsEffectScaleUpper[!is.na(futilityBoundsEffectScaleUpper) & futilityBoundsEffectScaleUpper <= 0] <- NA_real_ futilityBoundsEffectScaleLower[!is.na(futilityBoundsEffectScaleLower) & futilityBoundsEffectScaleLower <= 0] <- NA_real_ } return(list( criticalValuesEffectScaleUpper = matrix(criticalValuesEffectScaleUpper, nrow = design$kMax), criticalValuesEffectScaleLower = matrix(criticalValuesEffectScaleLower, nrow = design$kMax), futilityBoundsEffectScaleUpper = matrix(futilityBoundsEffectScaleUpper, nrow = design$kMax - 1), futilityBoundsEffectScaleLower = matrix(futilityBoundsEffectScaleLower, nrow = design$kMax - 1) )) } .getEffectScaleBoundaryDataRates <- function(designPlan) { design <- designPlan$.design thetaH0 <- designPlan$thetaH0 pi2 <- designPlan$pi2 maxNumberOfSubjects <- designPlan$maxNumberOfSubjects allocationRatioPlanned <- designPlan$allocationRatioPlanned directionUpper <- designPlan$directionUpper nParameters <- length(maxNumberOfSubjects) directionUpper[is.na(directionUpper)] <- TRUE criticalValuesEffectScaleUpper <- matrix(, nrow = design$kMax, ncol = nParameters) criticalValuesEffectScaleLower <- matrix(, nrow = design$kMax, ncol = nParameters) futilityBoundsEffectScaleUpper <- matrix(, nrow = design$kMax - 1, ncol = nParameters) futilityBoundsEffectScaleLower <- matrix(, nrow = design$kMax - 1, ncol = nParameters) if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, nParameters) } futilityBounds <- design$futilityBounds futilityBounds[!is.na(futilityBounds) & futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- NA_real_ if (designPlan$groups == 1) { n1 <- design$informationRates %*% t(maxNumberOfSubjects) for (j in (1:nParameters)) { criticalValuesEffectScaleUpper[, j] <- thetaH0 + (2 * directionUpper[j] - 1) * design$criticalValues * sqrt(thetaH0 * (1 - thetaH0)) / sqrt(n1[, j]) if (design$sided == 2) { criticalValuesEffectScaleLower[, j] <- thetaH0 - (2 * directionUpper[j] - 1) * design$criticalValues * sqrt(thetaH0 * (1 - thetaH0)) / sqrt(n1[, j]) } if (!.isTrialDesignFisher(design) && !all(is.na(futilityBounds))) { futilityBoundsEffectScaleUpper[, j] <- thetaH0 + (2 * directionUpper[j] - 1) * futilityBounds * sqrt(thetaH0 * (1 - thetaH0)) / sqrt(n1[1:(design$kMax - 1), j]) } if (!.isTrialDesignFisher(design) && design$sided == 2 && design$kMax > 1 && (design$typeOfDesign == C_TYPE_OF_DESIGN_PT || !is.null(design$typeBetaSpending) && design$typeBetaSpending != "none")) { futilityBoundsEffectScaleLower[, j] <- thetaH0 - (2 * directionUpper[j] - 1) * futilityBounds * sqrt(thetaH0 * (1 - thetaH0)) / sqrt(n1[1:(design$kMax - 1), j]) } } } else if (!designPlan$riskRatio) { boundaries <- design$criticalValues # calculate pi1 that solves (pi1 - pi2 - thetaH0) / SE(pi1 - pi2 - thetaH0) # = crit by using Farrington & Manning approach for (j in (1:nParameters)) { n1 <- allocationRatioPlanned[j] * design$informationRates * maxNumberOfSubjects[j] / (1 + allocationRatioPlanned[j]) n2 <- n1 / allocationRatioPlanned[j] for (i in (1:length(boundaries))) { tryCatch( { pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues( rate1 = x, rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned[j], method = "diff" ) (x - pi2 - thetaH0) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + fm$ml2 * (1 - fm$ml2) / n2[i]) - (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5 )$root }, error = function(e) { pi1Bound <<- NA_real_ } ) # difference to pi2 criticalValuesEffectScaleUpper[i, j] <- pi1Bound - pi2 } if (design$sided == 2) { for (i in (1:length(boundaries))) { tryCatch( { pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues( rate1 = x, rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned[j], method = "diff" ) (x - pi2 - thetaH0) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + fm$ml2 * (1 - fm$ml2) / n2[i]) + (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5 )$root }, error = function(e) { pi1Bound <<- NA_real_ } ) # difference to pi2 criticalValuesEffectScaleLower[i, j] <- pi1Bound - pi2 } } } if (!.isTrialDesignFisher(design) && !all(is.na(futilityBounds))) { boundaries <- futilityBounds for (j in (1:nParameters)) { n1 <- allocationRatioPlanned[j] * design$informationRates * maxNumberOfSubjects[j] / (1 + allocationRatioPlanned[j]) n2 <- n1 / allocationRatioPlanned[j] for (i in (1:length(boundaries))) { tryCatch( { pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues( rate1 = x, rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned[j], method = "diff" ) (x - pi2 - thetaH0) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + fm$ml2 * (1 - fm$ml2) / n2[i]) - (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5 )$root }, error = function(e) { pi1Bound <<- NA_real_ } ) # difference to pi2 futilityBoundsEffectScaleUpper[i, j] <- pi1Bound - pi2 } } } if (!.isTrialDesignFisher(design) && design$sided == 2 && design$kMax > 1 && (design$typeOfDesign == C_TYPE_OF_DESIGN_PT || !is.null(design$typeBetaSpending) && design$typeBetaSpending != "none")) { boundaries <- -futilityBounds for (j in (1:nParameters)) { n1 <- allocationRatioPlanned[j] * design$informationRates * maxNumberOfSubjects[j] / (1 + allocationRatioPlanned[j]) n2 <- n1 / allocationRatioPlanned[j] for (i in (1:length(boundaries))) { tryCatch( { pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues( rate1 = x, rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned[j], method = "diff" ) (x - pi2 - thetaH0) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + fm$ml2 * (1 - fm$ml2) / n2[i]) - (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5 )$root }, error = function(e) { pi1Bound <<- NA_real_ } ) futilityBoundsEffectScaleLower[i, j] <- pi1Bound - pi2 # difference to pi2 } } } } else { boundaries <- design$criticalValues # calculate pi1 that solves (pi1 - thetaH0 * pi2) / SE(pi1 - thetaH0 * pi2) # = crit by using Farrington & Manning approach for (j in (1:nParameters)) { n1 <- allocationRatioPlanned[j] * design$informationRates * maxNumberOfSubjects[j] / (1 + allocationRatioPlanned[j]) n2 <- n1 / allocationRatioPlanned[j] for (i in (1:length(boundaries))) { tryCatch( { pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues( rate1 = x, rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned[j], method = "ratio" ) (x - thetaH0 * pi2) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + thetaH0^2 * fm$ml2 * (1 - fm$ml2) / n2[i]) - (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5 )$root }, error = function(e) { pi1Bound <<- NA_real_ } ) # ratio to pi2 criticalValuesEffectScaleUpper[i, j] <- pi1Bound / pi2 } if (design$sided == 2) { for (i in (1:length(boundaries))) { tryCatch( { pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues( rate1 = x, rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned[j], method = "ratio" ) (x - thetaH0 * pi2) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + thetaH0^2 * fm$ml2 * (1 - fm$ml2) / n2[i]) + (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5 )$root }, error = function(e) { pi1Bound <<- NA_real_ } ) # ratio to pi2 criticalValuesEffectScaleLower[i, j] <- pi1Bound / pi2 } } } if (!.isTrialDesignFisher(design) && !all(is.na(futilityBounds))) { boundaries <- futilityBounds for (j in (1:nParameters)) { n1 <- allocationRatioPlanned[j] * design$informationRates * maxNumberOfSubjects[j] / (1 + allocationRatioPlanned[j]) n2 <- n1 / allocationRatioPlanned[j] for (i in (1:length(boundaries))) { tryCatch( { pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues( rate1 = x, rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned[j], method = "ratio" ) (x - thetaH0 * pi2) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + thetaH0^2 * fm$ml2 * (1 - fm$ml2) / n2[i]) - (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5 )$root }, error = function(e) { pi1Bound <<- NA_real_ } ) # ratio to pi2 futilityBoundsEffectScaleUpper[i, j] <- pi1Bound / pi2 } } } if (!.isTrialDesignFisher(design) && design$sided == 2 && design$kMax > 1 && (design$typeOfDesign == C_TYPE_OF_DESIGN_PT || !is.null(design$typeBetaSpending) && design$typeBetaSpending != "none")) { boundaries <- -futilityBounds for (j in (1:nParameters)) { n1 <- allocationRatioPlanned[j] * design$informationRates * maxNumberOfSubjects[j] / (1 + allocationRatioPlanned[j]) n2 <- n1 / allocationRatioPlanned[j] for (i in (1:length(boundaries))) { tryCatch( { pi1Bound <- uniroot( function(x) { fm <- .getFarringtonManningValues( rate1 = x, rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned[j], method = "ratio" ) (x - thetaH0 * pi2) / sqrt(fm$ml1 * (1 - fm$ml1) / n1[i] + thetaH0^2 * fm$ml2 * (1 - fm$ml2) / n2[i]) - (2 * directionUpper[j] - 1) * boundaries[i] }, lower = 0, upper = 1, tol = .Machine$double.eps^0.5 )$root }, error = function(e) { pi1Bound <<- NA_real_ } ) # ratio to pi2 futilityBoundsEffectScaleLower[i, j] <- pi1Bound / pi2 } } } } return(list( criticalValuesEffectScaleUpper = matrix(criticalValuesEffectScaleUpper, nrow = design$kMax), criticalValuesEffectScaleLower = matrix(criticalValuesEffectScaleLower, nrow = design$kMax), futilityBoundsEffectScaleUpper = matrix(futilityBoundsEffectScaleUpper, nrow = design$kMax - 1), futilityBoundsEffectScaleLower = matrix(futilityBoundsEffectScaleLower, nrow = design$kMax - 1) )) } .getEffectScaleBoundaryDataSurvival <- function(designPlan) { design <- designPlan$.design thetaH0 <- designPlan$thetaH0 eventsPerStage <- designPlan$eventsPerStage allocationRatioPlanned <- designPlan$allocationRatioPlanned directionUpper <- designPlan$directionUpper if (design$kMax == 1) { nParameters <- length(eventsPerStage) } else { nParameters <- ncol(eventsPerStage) } directionUpper[is.na(directionUpper)] <- TRUE if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, nParameters) } futilityBounds <- design$futilityBounds futilityBounds[!is.na(futilityBounds) & futilityBounds <= C_FUTILITY_BOUNDS_DEFAULT] <- NA_real_ criticalValues <- design$criticalValues criticalValuesEffectScaleUpper <- matrix(, nrow = design$kMax, ncol = nParameters) criticalValuesEffectScaleLower <- matrix(, nrow = design$kMax, ncol = nParameters) futilityBoundsEffectScaleUpper <- matrix(, nrow = design$kMax - 1, ncol = nParameters) futilityBoundsEffectScaleLower <- matrix(, nrow = design$kMax - 1, ncol = nParameters) for (j in (1:nParameters)) { if (design$sided == 1) { criticalValuesEffectScaleUpper[, j] <- thetaH0 * (exp((2 * directionUpper[j] - 1) * criticalValues * (1 + allocationRatioPlanned[j]) / sqrt(allocationRatioPlanned[j] * eventsPerStage[, j]))) } else { criticalValuesEffectScaleUpper[, j] <- thetaH0 * (exp((2 * directionUpper[j] - 1) * criticalValues * (1 + allocationRatioPlanned[j]) / sqrt(allocationRatioPlanned[j] * eventsPerStage[, j]))) criticalValuesEffectScaleLower[, j] <- thetaH0 * (exp(-(2 * directionUpper[j] - 1) * criticalValues * (1 + allocationRatioPlanned[j]) / sqrt(allocationRatioPlanned[j] * eventsPerStage[, j]))) } if (!.isTrialDesignFisher(design) && !all(is.na(futilityBounds))) { futilityBoundsEffectScaleUpper[, j] <- thetaH0 * (exp((2 * directionUpper[j] - 1) * futilityBounds * (1 + allocationRatioPlanned[j]) / sqrt(allocationRatioPlanned[j] * eventsPerStage[1:(design$kMax - 1), j]))) } if (!.isTrialDesignFisher(design) && design$sided == 2 && design$kMax > 1 && (design$typeOfDesign == C_TYPE_OF_DESIGN_PT || !is.null(design$typeBetaSpending) && design$typeBetaSpending != "none")) { futilityBoundsEffectScaleLower[, j] <- thetaH0 * (exp(-(2 * directionUpper[j] - 1) * futilityBounds * (1 + allocationRatioPlanned[j]) / sqrt(allocationRatioPlanned[j] * eventsPerStage[1:(design$kMax - 1), j]))) } } return(list( criticalValuesEffectScaleUpper = matrix(criticalValuesEffectScaleUpper, nrow = design$kMax), criticalValuesEffectScaleLower = matrix(criticalValuesEffectScaleLower, nrow = design$kMax), futilityBoundsEffectScaleUpper = matrix(futilityBoundsEffectScaleUpper, nrow = design$kMax - 1), futilityBoundsEffectScaleLower = matrix(futilityBoundsEffectScaleLower, nrow = design$kMax - 1) )) } #' @title #' Get Sample Size Means #' #' @description #' Returns the sample size for testing means in one or two samples. #' #' @inheritParams param_design_with_default #' @inheritParams param_groups #' @param normalApproximation The type of computation of the p-values. If \code{TRUE}, the variance is #' assumed to be known, default is \code{FALSE}, i.e., the calculations are performed #' with the t distribution. #' @param meanRatio If \code{TRUE}, the sample size for #' one-sided testing of H0: \code{mu1 / mu2 = thetaH0} is calculated, default is \code{FALSE}. #' @inheritParams param_thetaH0 #' @inheritParams param_alternative #' @inheritParams param_stDev #' @inheritParams param_allocationRatioPlanned_sampleSize #' @inheritParams param_three_dots #' #' @details #' At given design the function calculates the stage-wise (non-cumulated) and maximum #' sample size for testing means. #' In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. #' A null hypothesis value thetaH0 != 0 for testing the difference of two means or #' thetaH0 != 1 for testing the ratio of two means can be specified. #' Critical bounds and stopping for futility bounds are provided at the effect scale #' (mean, mean difference, or mean ratio, respectively) for each sample size calculation separately. #' #' @template return_object_trial_design_plan #' @template how_to_get_help_for_generics #' #' @family sample size functions #' #' @template examples_get_sample_size_means #' #' @export #' getSampleSizeMeans <- function(design = NULL, ..., groups = 2, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = seq(0.2, 1, 0.2), # C_ALTERNATIVE_DEFAULT stDev = 1, # C_STDEV_DEFAULT allocationRatioPlanned = NA_real_ # C_ALLOCATION_RATIO_DEFAULT ) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "sampleSize") .warnInCaseOfUnknownArguments( functionName = "getSampleSizeMeans", ignore = .getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = FALSE), ... ) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getSampleSizeMeans", ...) .warnInCaseOfTwoSidedPowerArgument(...) } designPlan <- .createDesignPlanMeans( objectType = "sampleSize", design = design, normalApproximation = normalApproximation, meanRatio = meanRatio, thetaH0 = thetaH0, alternative = alternative, stDev = stDev, groups = groups, allocationRatioPlanned = allocationRatioPlanned, ... ) return(.getSampleSize(designPlan)) } .warnInCaseOfTwoSidedPowerArgument <- function(...) { args <- list(...) argNames <- names(args) if ("twoSidedPower" %in% argNames) { warning("'twoSidedPower' can only be defined in 'design'", call. = FALSE) } } .warnInCaseOfTwoSidedPowerIsDisabled <- function(design) { if (design$sided == 2 && !is.na(design$twoSidedPower) && !design$twoSidedPower && design$.getParameterType("twoSidedPower") == C_PARAM_USER_DEFINED) { warning("design$twoSidedPower = FALSE will be ignored because design$sided = 2", call. = FALSE) } } #' @title #' Get Sample Size Rates #' #' @description #' Returns the sample size for testing rates in one or two samples. #' #' @inheritParams param_design_with_default #' @inheritParams param_groups #' @param normalApproximation If \code{FALSE}, the sample size #' for the case of one treatment group is calculated exactly using the binomial distribution, #' default is \code{TRUE}. #' @param riskRatio If \code{TRUE}, the sample size for one-sided #' testing of H0: \code{pi1 / pi2 = thetaH0} is calculated, default is \code{FALSE}. #' @inheritParams param_thetaH0 #' @inheritParams param_pi1_rates #' @inheritParams param_pi2_rates #' @inheritParams param_allocationRatioPlanned_sampleSize #' @inheritParams param_three_dots #' #' @details #' At given design the function calculates the stage-wise (non-cumulated) and maximum sample size for testing rates. #' In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. #' If a null hypothesis value thetaH0 != 0 for testing the difference of two rates #' thetaH0 != 1 for testing the risk ratio is specified, the sample size #' formula according to Farrington & Manning (Statistics in Medicine, 1990) is used. #' Critical bounds and stopping for futility bounds are provided at the effect scale #' (rate, rate difference, or rate ratio, respectively) for each sample size calculation separately. #' For the two-sample case, the calculation here is performed at fixed pi2 as given as argument #' in the function. #' #' @template return_object_trial_design_plan #' @template how_to_get_help_for_generics #' #' @family sample size functions #' #' @template examples_get_sample_size_rates #' #' @export #' getSampleSizeRates <- function(design = NULL, ..., groups = 2, normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = c(0.4, 0.5, 0.6), # C_PI_1_SAMPLE_SIZE_DEFAULT pi2 = 0.2, # C_PI_2_DEFAULT allocationRatioPlanned = NA_real_ # C_ALLOCATION_RATIO_DEFAULT ) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "sampleSize") .warnInCaseOfUnknownArguments( functionName = "getSampleSizeRates", ignore = .getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = FALSE), ... ) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getSampleSizeRates", ...) .warnInCaseOfTwoSidedPowerArgument(...) } designPlan <- .createDesignPlanRates( objectType = "sampleSize", design = design, normalApproximation = normalApproximation, riskRatio = riskRatio, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, groups = groups, allocationRatioPlanned = allocationRatioPlanned, ... ) return(.getSampleSize(designPlan)) } # Hidden parameter: # @param accountForObservationTimes If \code{accountForObservationTimes = TRUE}, the number of # subjects is calculated assuming specific accrual and follow-up time, default is \code{TRUE} # (see details). # If \code{accountForObservationTimes = FALSE}, only the event rates are used for the calculation # of the maximum number of subjects. # \code{accountForObservationTimes} can be selected as \code{FALSE}. In this case, # the number of subjects is calculated from the event probabilities only. # This kind of computation does not account for the specific accrual pattern and survival distribution. #' @title #' Get Sample Size Survival #' #' @description #' Returns the sample size for testing the hazard ratio in a two treatment groups survival design. #' #' @inheritParams param_design_with_default #' @inheritParams param_typeOfComputation #' @inheritParams param_allocationRatioPlanned_sampleSize #' @inheritParams param_thetaH0 #' @inheritParams param_lambda1 #' @inheritParams param_lambda2 #' @inheritParams param_pi1_survival #' @inheritParams param_pi2_survival #' @inheritParams param_median1 #' @inheritParams param_median2 #' @inheritParams param_piecewiseSurvivalTime #' @inheritParams param_accrualTime #' @inheritParams param_accrualIntensity #' @inheritParams param_accrualIntensityType #' @inheritParams param_eventTime #' @inheritParams param_hazardRatio #' @inheritParams param_kappa #' @inheritParams param_dropoutRate1 #' @inheritParams param_dropoutRate2 #' @inheritParams param_dropoutTime #' @param followUpTime The assumed (additional) follow-up time for the study, default is \code{6}. #' The total study duration is \code{accrualTime + followUpTime}. #' @param maxNumberOfSubjects If \code{maxNumberOfSubjects > 0} is specified, #' the follow-up time for the required number of events is determined. #' @inheritParams param_three_dots #' #' @details #' At given design the function calculates the number of events and an estimate for the #' necessary number of subjects for testing the hazard ratio in a survival design. #' It also calculates the time when the required events are expected under the given #' assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times #' and constant or non-constant piecewise accrual). #' Additionally, an allocation ratio = \code{n1 / n2} can be specified where \code{n1} and \code{n2} are the number #' of subjects in the two treatment groups. #' #' Optional argument \code{accountForObservationTimes}: if \code{accountForObservationTimes = TRUE}, the number of #' subjects is calculated assuming specific accrual and follow-up time, default is \code{TRUE}. #' #' The formula of Kim & Tsiatis (Biometrics, 1990) #' is used to calculate the expected number of events under the alternative #' (see also Lakatos & Lan, Statistics in Medicine, 1992). These formulas are generalized #' to piecewise survival times and non-constant piecewise accrual over time.\cr #' #' Optional argument \code{accountForObservationTimes}: if \code{accountForObservationTimes = FALSE}, #' only the event rates are used for the calculation of the maximum number of subjects. #' #' @template details_piecewise_survival #' #' @template details_piecewise_accrual #' #' @template return_object_trial_design_plan #' @template how_to_get_help_for_generics #' #' @family sample size functions #' #' @template examples_get_sample_size_survival #' #' @export #' getSampleSizeSurvival <- function(design = NULL, ..., typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0 = 1, # C_THETA_H0_SURVIVAL_DEFAULT pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, kappa = 1, hazardRatio = NA_real_, piecewiseSurvivalTime = NA_real_, allocationRatioPlanned = NA_real_, # C_ALLOCATION_RATIO_DEFAULT eventTime = 12, # C_EVENT_TIME_DEFAULT accrualTime = c(0, 12), # C_ACCRUAL_TIME_DEFAULT accrualIntensity = 0.1, # C_ACCRUAL_INTENSITY_DEFAULT accrualIntensityType = c("auto", "absolute", "relative"), followUpTime = NA_real_, maxNumberOfSubjects = NA_real_, dropoutRate1 = 0, # C_DROP_OUT_RATE_1_DEFAULT dropoutRate2 = 0, # C_DROP_OUT_RATE_2_DEFAULT dropoutTime = 12 # C_DROP_OUT_TIME_DEFAULT ) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "sampleSize", ignore = c("accountForObservationTimes")) .warnInCaseOfUnknownArguments( functionName = "getSampleSizeSurvival", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = FALSE ), "accountForObservationTimes"), ... ) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments( functionName = "getSampleSizeSurvival", ..., ignore = c("accountForObservationTimes") ) .warnInCaseOfTwoSidedPowerArgument(...) } if (!is.na(maxNumberOfSubjects) && maxNumberOfSubjects == 0) { maxNumberOfSubjects <- NA_real_ } # identify accrual time case accrualSetup <- getAccrualTime( accrualTime = accrualTime, accrualIntensity = accrualIntensity, accrualIntensityType = accrualIntensityType, maxNumberOfSubjects = maxNumberOfSubjects, showWarnings = FALSE ) accrualSetup$.validate() accountForObservationTimes <- .getOptionalArgument("accountForObservationTimes", ...) if (is.null(accountForObservationTimes)) { accountForObservationTimes <- TRUE } if (!accrualSetup$maxNumberOfSubjectsCanBeCalculatedDirectly && accrualSetup$followUpTimeMustBeUserDefined) { if (is.na(followUpTime)) { if (accrualSetup$piecewiseAccrualEnabled && !accrualSetup$endOfAccrualIsUserDefined) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'followUpTime', 'maxNumberOfSubjects' or end of accrual must be defined" ) } stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'followUpTime' or 'maxNumberOfSubjects' must be defined" ) } if (followUpTime == Inf) { followUpTime <- 1e12 } if (!any(is.na(hazardRatio)) && !is.na(thetaH0)) { .assertIsValidHazardRatio(hazardRatio, thetaH0) } pwst <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = piecewiseSurvivalTime, lambda1 = lambda1, lambda2 = lambda2, pi1 = pi1, pi2 = pi2, median1 = median1, median2 = median2, hazardRatio = hazardRatio, eventTime = eventTime, kappa = kappa, .silent = TRUE ) paramName <- NULL if (!pwst$piecewiseSurvivalEnabled) { if (pwst$.getParameterType("pi1") == C_PARAM_USER_DEFINED || pwst$.getParameterType("pi1") == C_PARAM_DEFAULT_VALUE || pwst$.getParameterType("pi2") == C_PARAM_USER_DEFINED) { paramName <- "pi1" } else if (pwst$.getParameterType("lambda1") == C_PARAM_USER_DEFINED || pwst$.getParameterType("lambda2") == C_PARAM_USER_DEFINED) { paramName <- "lambda1" } else if (pwst$.getParameterType("hazardRatio") == C_PARAM_USER_DEFINED) { paramName <- "hazardRatio" } else if (pwst$.getParameterType("median1") == C_PARAM_USER_DEFINED || pwst$.getParameterType("median2") == C_PARAM_USER_DEFINED) { paramName <- "median1" } } else if (pwst$.getParameterType("hazardRatio") == C_PARAM_USER_DEFINED) { paramName <- "hazardRatio" } if (!is.null(paramName)) { paramValue <- pwst[[paramName]] if (!is.null(paramValue) && length(paramValue) > 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the calculation of 'maxNumberOfSubjects' for given 'followUpTime' ", "is only available for a single '", paramName, "'; ", paramName, " = ", .arrayToString( paramValue, vectorLookAndFeelEnabled = TRUE ) ) } } hr <- hazardRatio if (all(is.na(hazardRatio))) { hr <- pwst$hazardRatio } if (all(is.na(hazardRatio))) { .assertIsValidHazardRatio(hr, thetaH0) } maxNumberOfSubjectsTarget <- NA_real_ withCallingHandlers( { # search for accrual time that provides a result at <- accrualSetup$accrualTime additionalAccrual <- 1 searchAccrualTimeEnabled <- TRUE maxSearchIterations <- 50 maxNumberOfSubjectsLower <- NA_real_ maxNumberOfSubjectsLowerBefore <- 0 sampleSize <- NULL expectionMessage <- NA_character_ while (searchAccrualTimeEnabled && maxSearchIterations >= 0 && (is.na(maxNumberOfSubjectsLower) || maxNumberOfSubjectsLower < maxNumberOfSubjectsLowerBefore || maxNumberOfSubjectsLower < 1e8)) { tryCatch( { maxNumberOfSubjectsLowerBefore <- ifelse(is.na(maxNumberOfSubjectsLower), 0, maxNumberOfSubjectsLower ) maxNumberOfSubjectsLower <- getAccrualTime( accrualTime = c(at, at[length(at)] + additionalAccrual), accrualIntensity = accrualSetup$accrualIntensity, accrualIntensityType = accrualIntensityType )$maxNumberOfSubjects additionalAccrual <- 2 * additionalAccrual sampleSize <- .getSampleSizeSurvival( design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$accrualTime, accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = NA_real_, maxNumberOfSubjects = maxNumberOfSubjectsLower, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio ) searchAccrualTimeEnabled <- FALSE }, error = function(e) { expectionMessage <<- e$message } ) maxSearchIterations <- maxSearchIterations - 1 } if (is.null(sampleSize) || is.na(sampleSize$followUpTime)) { if (!is.na(expectionMessage) && grepl("'allocationRatioPlanned' > 0", expectionMessage)) { stop(expectionMessage, call. = FALSE) } stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'additionalAccrual' could not be found, change accrual time specification", call. = FALSE ) } # define lower bound for maxNumberOfSubjects maxNumberOfSubjectsLower <- ceiling(max(na.omit(c( sampleSize$eventsFixed, as.vector(sampleSize$eventsPerStage) )))) if (is.na(maxNumberOfSubjectsLower)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjectsLower' could not be found", call. = FALSE ) } # check whether accrual time already fulfills requirement # (followUpTime < given value) or need to be increased, # then define upper bound for maxNumberOfSubjects maxSearchIterations <- 50 maxNumberOfSubjectsUpper <- NA_real_ fut <- sampleSize$followUpTime iterations <- 1 while (fut <= followUpTime) { fut <- 2 * abs(fut) iterations <- iterations + 1 if (iterations > 50) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "search algorithm failed to end", call. = FALSE ) } } while (!is.na(fut) && fut > followUpTime && maxSearchIterations >= 0) { maxNumberOfSubjectsUpper <- getAccrualTime( accrualTime = c(at, at[length(at)] + additionalAccrual), accrualIntensity = accrualSetup$accrualIntensity, accrualIntensityType = accrualIntensityType )$maxNumberOfSubjects additionalAccrual <- 2 * additionalAccrual fut <- .getSampleSizeSurvival( design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$accrualTime, accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = NA_real_, maxNumberOfSubjects = maxNumberOfSubjectsUpper, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio )$followUpTime maxSearchIterations <- maxSearchIterations - 1 } if (is.na(maxNumberOfSubjectsUpper)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'maxNumberOfSubjectsUpper' could not be found ", "(fut = ", fut, ", followUpTime = ", followUpTime, ")", call. = FALSE ) } # use maxNumberOfSubjectsLower and maxNumberOfSubjectsUpper to find end of accrual if (dropoutRate1 != 0 || dropoutRate2 != 0) { # Adjust lower bound for given dropouts assuming exponential distribution if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } maxNumberOfSubjectsLower <- maxNumberOfSubjectsLower / ((allocationRatioPlanned * (1 - dropoutRate1)^( accrualSetup$accrualTime[length(accrualSetup$accrualTime)] / dropoutTime) + (1 - dropoutRate2)^(accrualSetup$accrualTime[length(accrualSetup$accrualTime)] / dropoutTime)) / (allocationRatioPlanned + 1)) prec <- 1 maxSearchIterations <- 50 while (prec > 1e-04 && maxSearchIterations >= 0) { maxNumberOfSubjectsTarget <- (maxNumberOfSubjectsLower + maxNumberOfSubjectsUpper) / 2 fut <- .getSampleSizeSurvival( design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$accrualTime, accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = NA_real_, maxNumberOfSubjects = maxNumberOfSubjectsTarget, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio )$followUpTime ifelse(fut <= followUpTime, maxNumberOfSubjectsUpper <- maxNumberOfSubjectsTarget, maxNumberOfSubjectsLower <- maxNumberOfSubjectsTarget ) prec <- maxNumberOfSubjectsUpper - maxNumberOfSubjectsLower maxSearchIterations <- maxSearchIterations - 1 } } else { maxNumberOfSubjectsTarget <- .getOneDimensionalRootBisectionMethod( fun = function(x) { fut <- .getSampleSizeSurvival( design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$accrualTime, accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = NA_real_, maxNumberOfSubjects = x, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio )$followUpTime return(followUpTime - fut) }, lower = maxNumberOfSubjectsLower, upper = maxNumberOfSubjectsUpper, tolerance = 1e-04, acceptResultsOutOfTolerance = TRUE, maxSearchIterations = 50, direction = 0, suppressWarnings = FALSE, callingFunctionInformation = "getSampleSizeSurvival" ) } }, warning = function(w) { invokeRestart("muffleWarning") } ) if (is.na(maxNumberOfSubjectsTarget)) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to calculate 'maxNumberOfSubjects' by given 'followUpTime' ", "(lower = ", maxNumberOfSubjectsLower, ", upper = ", maxNumberOfSubjectsUpper, ")" ) } sampleSizeSurvival <- .getSampleSizeSurvival( design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$accrualTime, accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = NA_real_, maxNumberOfSubjects = maxNumberOfSubjectsTarget, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio ) sampleSizeSurvival$.setParameterType("followUpTime", C_PARAM_USER_DEFINED) sampleSizeSurvival$.accrualTime <- accrualSetup if (!is.na(sampleSizeSurvival$followUpTime)) { if (followUpTime == 1e12) { followUpTime <- Inf } if (sampleSizeSurvival$followUpTime >= -1e-02 && sampleSizeSurvival$followUpTime <= 1e-02) { sampleSizeSurvival$followUpTime <- 0 } if (sampleSizeSurvival$followUpTime < followUpTime - 1e-02 || sampleSizeSurvival$followUpTime > followUpTime + 1e-02) { sampleSizeSurvival$.setParameterType("followUpTime", C_PARAM_GENERATED) warning("User defined 'followUpTime' (", followUpTime, ") ignored because ", "follow-up time is ", round(sampleSizeSurvival$followUpTime, 4), call. = FALSE ) } } sampleSizeSurvival$.setParameterType("maxNumberOfSubjects", C_PARAM_GENERATED) sampleSizeSurvival$.setParameterType("accrualTime", C_PARAM_GENERATED) return(sampleSizeSurvival) } return(.getSampleSizeSurvival( design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualTime, accrualIntensity = accrualIntensity, accrualIntensityType = accrualIntensityType, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = followUpTime, maxNumberOfSubjects = maxNumberOfSubjects, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio )) } .getSampleSizeSurvival <- function(..., design = NULL, typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0 = 1, pi2 = NA_real_, pi1 = NA_real_, allocationRatioPlanned = NA_real_, accountForObservationTimes = TRUE, eventTime = C_EVENT_TIME_DEFAULT, accrualTime = C_ACCRUAL_TIME_DEFAULT, accrualIntensity = C_ACCRUAL_INTENSITY_DEFAULT, accrualIntensityType = c("auto", "absolute", "relative"), kappa = 1, piecewiseSurvivalTime = NA_real_, lambda2 = NA_real_, lambda1 = NA_real_, median1 = NA_real_, median2 = NA_real_, followUpTime = NA_real_, maxNumberOfSubjects = NA_real_, dropoutRate1 = 0, dropoutRate2 = dropoutRate1, dropoutTime = NA_real_, hazardRatio = NA_real_) { designPlan <- .createDesignPlanSurvival( objectType = "sampleSize", design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualTime, accrualIntensity = accrualIntensity, accrualIntensityType = accrualIntensityType, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, followUpTime = followUpTime, maxNumberOfSubjects = maxNumberOfSubjects, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio ) return(.getSampleSize(designPlan)) } .createDesignPlanSurvival <- function(..., objectType = c("sampleSize", "power"), design, typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0, pi2, pi1, allocationRatioPlanned, accountForObservationTimes, eventTime, accrualTime, accrualIntensity, accrualIntensityType, kappa, piecewiseSurvivalTime, lambda2, lambda1, median1, median2, followUpTime = NA_real_, directionUpper = NA, maxNumberOfEvents = NA_real_, maxNumberOfSubjects, dropoutRate1, dropoutRate2, dropoutTime, hazardRatio) { objectType <- match.arg(objectType) typeOfComputation <- .matchArgument(typeOfComputation, "Schoenfeld") .assertIsTrialDesignInverseNormalOrGroupSequential(design) .assertIsValidAlphaAndBeta(design$alpha, design$beta) .assertIsValidSidedParameter(design$sided) .assertIsSingleLogical(accountForObservationTimes, "accountForObservationTimes", naAllowed = TRUE) .assertIsSingleNumber(thetaH0, "thetaH0") .assertIsValidThetaH0(thetaH0, endpoint = "survival", groups = 2) .assertIsValidKappa(kappa) .assertIsValidDirectionUpper(directionUpper, design$sided, objectType, userFunctionCallEnabled = TRUE) if (objectType == "power") { .assertIsSingleNumber(maxNumberOfEvents, "maxNumberOfEvents") .assertIsInClosedInterval(maxNumberOfEvents, "maxNumberOfEvents", lower = 1, upper = maxNumberOfSubjects ) } if (!any(is.na(pi1)) && (any(pi1 <= 0) || any(pi1 >= 1))) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "event rate 'pi1' (", .arrayToString(pi1), ") is out of bounds (0; 1)" ) } if (!any(is.na(pi2)) && (any(pi2 <= 0) || any(pi2 >= 1))) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "event rate 'pi2' (", .arrayToString(pi2), ") is out of bounds (0; 1)" ) } if (design$sided == 2 && thetaH0 != 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "two-sided case is implemented for superiority testing only (i.e., thetaH0 = 1)" ) } if (thetaH0 <= 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "null hypothesis hazard ratio is not allowed be negative or zero" ) } if (!(typeOfComputation %in% c("Schoenfeld", "Freedman", "HsiehFreedman"))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "computation type ('", typeOfComputation, "') must be one of the following: ", "'Schoenfeld', 'Freedman', or 'HsiehFreedman' " ) } if (typeOfComputation != "Schoenfeld" && thetaH0 != 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Freedman test calculation is possible only for superiority testing (thetaH0 != 1)" ) } if (is.numeric(accrualTime) && all(is.na(accrualTime))) { accrualTime <- C_ACCRUAL_TIME_DEFAULT } if (all(is.na(accrualIntensity))) { accrualIntensity <- C_ACCRUAL_INTENSITY_DEFAULT } accrualSetup <- getAccrualTime( accrualTime = accrualTime, accrualIntensity = accrualIntensity, accrualIntensityType = accrualIntensityType, maxNumberOfSubjects = maxNumberOfSubjects ) accrualSetup$.validate() if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } .assertIsValidAllocationRatioPlannedSampleSize(allocationRatioPlanned, maxNumberOfSubjects) designPlan <- TrialDesignPlanSurvival( design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = accountForObservationTimes, eventTime = eventTime, accrualTime = accrualSetup$.getAccrualTimeWithoutLeadingZero(), accrualIntensity = accrualSetup$accrualIntensity, kappa = kappa, followUpTime = followUpTime, maxNumberOfSubjects = maxNumberOfSubjects, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio ) .setValueAndParameterType( designPlan, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT ) .setValueAndParameterType(designPlan, "dropoutRate1", dropoutRate1, C_DROP_OUT_RATE_1_DEFAULT) .setValueAndParameterType(designPlan, "dropoutRate2", dropoutRate2, C_DROP_OUT_RATE_2_DEFAULT) .setValueAndParameterType(designPlan, "dropoutTime", dropoutTime, C_DROP_OUT_TIME_DEFAULT) .setValueAndParameterType(designPlan, "kappa", kappa, 1) designPlan$.setSampleSizeObject(objectType) designPlan$criticalValuesPValueScale <- matrix(design$stageLevels, ncol = 1) if (design$sided == 2) { designPlan$criticalValuesPValueScale <- designPlan$criticalValuesPValueScale * 2 designPlan$.setParameterType("criticalValuesPValueScale", C_PARAM_GENERATED) } if (any(design$futilityBounds > C_FUTILITY_BOUNDS_DEFAULT)) { designPlan$futilityBoundsPValueScale <- matrix(1 - stats::pnorm(design$futilityBounds), ncol = 1) designPlan$.setParameterType("futilityBoundsPValueScale", C_PARAM_GENERATED) } designPlan$.accrualTime <- accrualSetup designPlan$totalAccrualTime <- accrualSetup$accrualTime[length(accrualSetup$accrualTime)] if (length(accrualSetup$accrualTime) > 2) { designPlan$.setParameterType("totalAccrualTime", C_PARAM_GENERATED) } else { designPlan$.setParameterType("totalAccrualTime", C_PARAM_NOT_APPLICABLE) } if (is.na(maxNumberOfSubjects)) { if (!is.na(accrualSetup$maxNumberOfSubjects)) { designPlan$maxNumberOfSubjects <- accrualSetup$maxNumberOfSubjects designPlan$.setParameterType( "maxNumberOfSubjects", accrualSetup$.getParameterType("maxNumberOfSubjects") ) } } else if (maxNumberOfSubjects == 0) { designPlan$.setParameterType("maxNumberOfSubjects", C_PARAM_GENERATED) } else { designPlan$.setParameterType("maxNumberOfSubjects", C_PARAM_USER_DEFINED) } if (identical(as.integer(accrualSetup$accrualTime), C_ACCRUAL_TIME_DEFAULT) || identical( as.integer(c(0L, accrualSetup$.getAccrualTimeWithoutLeadingZero())), C_ACCRUAL_TIME_DEFAULT )) { designPlan$.setParameterType("accrualTime", C_PARAM_DEFAULT_VALUE) } else { designPlan$.setParameterType("accrualTime", accrualSetup$.getParameterType("accrualTime")) } if (length(designPlan$accrualIntensity) == 1 && designPlan$accrualIntensity == C_ACCRUAL_INTENSITY_DEFAULT) { designPlan$.setParameterType("accrualIntensity", C_PARAM_DEFAULT_VALUE) } else { designPlan$.setParameterType( "accrualIntensity", accrualSetup$.getParameterType("accrualIntensity") ) } .assertIsSingleNumber(designPlan$eventTime, "eventTime") .assertIsSingleNumber(designPlan$allocationRatioPlanned, "allocationRatioPlanned") .assertIsSingleNumber(designPlan$kappa, "kappa") if (objectType == "power") { .assertIsValidMaxNumberOfSubjects(designPlan$maxNumberOfSubjects) } .assertIsSingleNumber(designPlan$dropoutRate1, "dropoutRate1") .assertIsSingleNumber(designPlan$dropoutRate2, "dropoutRate2") .assertIsSingleNumber(designPlan$dropoutTime, "dropoutTime") if (objectType == "power") { pi1Default <- C_PI_1_DEFAULT } else { pi1Default <- C_PI_1_SAMPLE_SIZE_DEFAULT } designPlan$.piecewiseSurvivalTime <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, hazardRatio = hazardRatio, pi1 = pi1, pi2 = pi2, eventTime = eventTime, kappa = kappa, .pi1Default = pi1Default ) designPlan$.setParameterType("kappa", designPlan$.piecewiseSurvivalTime$.getParameterType("kappa")) if (designPlan$.piecewiseSurvivalTime$.getParameterType("pi1") == C_PARAM_DEFAULT_VALUE && length(designPlan$.piecewiseSurvivalTime$pi1) > 1 && length(accrualSetup$accrualIntensity) > 1 && all(accrualSetup$accrualIntensity < 1)) { designPlan$.piecewiseSurvivalTime$pi1 <- designPlan$.piecewiseSurvivalTime$pi1[1] warning("Only the first default 'pi1' (", designPlan$.piecewiseSurvivalTime$pi1, ") was used ", "because the accrual intensities (", .arrayToString(accrualSetup$accrualIntensity), ") ", "were defined relative (all accrual intensities are < 1)", call. = FALSE ) } .initDesignPlanSurvival(designPlan) designPlan$.setParameterType("followUpTime", C_PARAM_NOT_APPLICABLE) if (designPlan$accountForObservationTimes) { .assertIsSingleNumber(dropoutRate1, "dropoutRate1") .assertIsSingleNumber(dropoutRate2, "dropoutRate2") .assertIsSingleNumber(dropoutTime, "dropoutTime") if (!is.na(dropoutTime) && dropoutTime <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dropoutTime' (", dropoutTime, ") must be > 0") } if (dropoutRate1 < 0 || dropoutRate1 >= 1) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'dropoutRate1' (", dropoutRate1, ") is out of bounds [0; 1)" ) } if (dropoutRate2 < 0 || dropoutRate2 >= 1) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'dropoutRate2' (", dropoutRate2, ") is out of bounds [0; 1)" ) } if (!is.na(eventTime) && eventTime <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'eventTime' (", eventTime, ") must be > 0") } .assertIsValidAccrualTime(accrualSetup$.getAccrualTimeWithoutLeadingZero()) .assertIsValidFollowUpTime(followUpTime) .setValueAndParameterType(designPlan, "followUpTime", followUpTime, C_FOLLOW_UP_TIME_DEFAULT) if (.isUserDefinedMaxNumberOfSubjects(designPlan) && !is.null(followUpTime) && length(followUpTime) == 1 && !is.na(followUpTime)) { warning("Follow-up time will be calculated, value entered (", followUpTime, ") is not taken into account", call. = FALSE ) } else if (is.na(followUpTime)) { designPlan$followUpTime <- C_FOLLOW_UP_TIME_DEFAULT designPlan$.setParameterType("followUpTime", C_PARAM_DEFAULT_VALUE) } if (objectType == "power") { designPlan$followUpTime <- NA_real_ designPlan$.setParameterType("followUpTime", C_PARAM_NOT_APPLICABLE) } } else { for (p in c( "accrualTime", "accrualIntensity", "eventTime", "dropoutRate1", "dropoutRate2", "dropoutTime", "followUpTime", "analysisTime", "studyDuration" )) { designPlan$.setParameterType(p, C_PARAM_NOT_APPLICABLE) } if (designPlan$.getParameterType("accrualTime") == C_PARAM_USER_DEFINED || !identical(accrualTime, C_ACCRUAL_TIME_DEFAULT)) { designPlan$.warnInCaseArgumentExists(accrualSetup$accrualTime, "accrualTime") } designPlan$.warnInCaseArgumentExists(dropoutRate1, "dropoutRate1") designPlan$.warnInCaseArgumentExists(dropoutRate2, "dropoutRate2") if (!identical(dropoutTime, C_DROP_OUT_TIME_DEFAULT)) { designPlan$.warnInCaseArgumentExists(dropoutTime, "dropoutTime") } designPlan$.warnInCaseArgumentExists(maxNumberOfSubjects, "maxNumberOfSubjects") if (!identical(followUpTime, C_FOLLOW_UP_TIME_DEFAULT)) { designPlan$.warnInCaseArgumentExists(followUpTime, "followUpTime") } } .setValueAndParameterType(designPlan, "directionUpper", directionUpper, TRUE) if (objectType == "power") { .setValueAndParameterType(designPlan, "maxNumberOfEvents", maxNumberOfEvents, NA_real_) designPlan$.setParameterType("accountForObservationTimes", C_PARAM_NOT_APPLICABLE) } return(designPlan) } .isUserDefinedMaxNumberOfSubjects <- function(designPlan) { if (!is.null(designPlan) && length(designPlan$.getParameterType("maxNumberOfSubjects")) > 0) { if (designPlan$.getParameterType("maxNumberOfSubjects") == C_PARAM_USER_DEFINED) { return(TRUE) } } return(!is.null(designPlan$maxNumberOfSubjects) && length(designPlan$maxNumberOfSubjects) == 1 && !is.na(designPlan$maxNumberOfSubjects) && designPlan$maxNumberOfSubjects > 0) } .initDesignPlanSurvivalByPiecewiseSurvivalTimeObject <- function(designPlan, pwstSetup) { designPlan$pi1 <- pwstSetup$pi1 designPlan$.setParameterType("pi1", pwstSetup$.getParameterType("pi1")) designPlan$pi2 <- pwstSetup$pi2 designPlan$.setParameterType("pi2", pwstSetup$.getParameterType("pi2")) designPlan$hazardRatio <- pwstSetup$hazardRatio designPlan$.setParameterType("hazardRatio", pwstSetup$.getParameterType("hazardRatio")) designPlan$lambda1 <- pwstSetup$lambda1 designPlan$.setParameterType("lambda1", pwstSetup$.getParameterType("lambda1")) designPlan$lambda2 <- pwstSetup$lambda2 designPlan$.setParameterType("lambda2", pwstSetup$.getParameterType("lambda2")) designPlan$median1 <- pwstSetup$median1 designPlan$.setParameterType("median1", pwstSetup$.getParameterType("median1")) designPlan$median2 <- pwstSetup$median2 designPlan$.setParameterType("median2", pwstSetup$.getParameterType("median2")) designPlan$piecewiseSurvivalTime <- pwstSetup$piecewiseSurvivalTime designPlan$.setParameterType( "piecewiseSurvivalTime", pwstSetup$.getParameterType("piecewiseSurvivalTime") ) designPlan$eventTime <- pwstSetup$eventTime designPlan$.setParameterType("eventTime", pwstSetup$.getParameterType("eventTime")) if (pwstSetup$.isLambdaBased()) { return(length(designPlan$hazardRatio)) } return(length(designPlan$pi1)) } .initDesignPlanSurvival <- function(designPlan) { numberOfResults <- .initDesignPlanSurvivalByPiecewiseSurvivalTimeObject( designPlan, designPlan$.piecewiseSurvivalTime ) if (designPlan$.piecewiseSurvivalTime$.isLambdaBased()) { if (length(designPlan$accountForObservationTimes) == 0 || is.na(designPlan$accountForObservationTimes) || !designPlan$accountForObservationTimes) { designPlan$accountForObservationTimes <- TRUE designPlan$.setParameterType("accountForObservationTimes", C_PARAM_DEFAULT_VALUE) } if (!designPlan$accountForObservationTimes) { designPlan$accountForObservationTimes <- TRUE warning("'accountForObservationTimes' was set to TRUE ", "because piecewise exponential survival function is enabled", call. = FALSE ) } } else { if (.isUserDefinedMaxNumberOfSubjects(designPlan)) { if (length(designPlan$accountForObservationTimes) != 0 && !is.na(designPlan$accountForObservationTimes) && !designPlan$accountForObservationTimes) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accountForObservationTimes' must be TRUE because 'maxNumberOfSubjects' is > 0" ) } designPlan$.setParameterType("accountForObservationTimes", C_PARAM_GENERATED) designPlan$accountForObservationTimes <- TRUE } else { if (length(designPlan$accountForObservationTimes) == 0 || is.na(designPlan$accountForObservationTimes)) { designPlan$accountForObservationTimes <- FALSE designPlan$.setParameterType("accountForObservationTimes", C_PARAM_DEFAULT_VALUE) } else { designPlan$.setParameterType( "accountForObservationTimes", ifelse(designPlan$accountForObservationTimes, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED ) ) } } } designPlan$.setParameterType("chi", C_PARAM_NOT_APPLICABLE) if (designPlan$.isSampleSizeObject()) { designPlan$.setParameterType("directionUpper", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("maxNumberOfEvents", C_PARAM_NOT_APPLICABLE) } return(numberOfResults = numberOfResults) } .warnInCaseOfDefinedPiValue <- function(designPlan, piValueName) { piValue <- designPlan[[piValueName]] if (!is.null(piValue) && !is.na(piValue) && length(piValue) > 0) { designPlan$.setParameterType(piValueName, C_PARAM_NOT_APPLICABLE) warning("'pi2' (", .arrayToString(piValue), ") will be ignored ", "because piecewise exponential survival function is enabled", call. = FALSE ) designPlan[[piValueName]] <- NA_real_ } } .getSampleSize <- function(designPlan) { if (.isTrialDesignPlanMeans(designPlan) || .isTrialDesignPlanRates(designPlan)) { if (identical(designPlan$allocationRatioPlanned, 0)) { designPlan$optimumAllocationRatio <- TRUE designPlan$.setParameterType("optimumAllocationRatio", C_PARAM_USER_DEFINED) } if (.isTrialDesignPlanMeans(designPlan)) { sampleSizeFixed <- .getSampleSizeFixedMeans( alpha = designPlan$getAlpha(), beta = designPlan$getBeta(), sided = designPlan$getSided(), twoSidedPower = designPlan$getTwoSidedPower(), normalApproximation = designPlan$normalApproximation, meanRatio = designPlan$meanRatio, thetaH0 = designPlan$thetaH0, alternative = designPlan$alternative, stDev = designPlan$stDev, groups = designPlan$groups, allocationRatioPlanned = designPlan$allocationRatioPlanned ) } else { sampleSizeFixed <- .getSampleSizeFixedRates( alpha = designPlan$getAlpha(), beta = designPlan$getBeta(), sided = designPlan$getSided(), normalApproximation = designPlan$normalApproximation, riskRatio = designPlan$riskRatio, thetaH0 = designPlan$thetaH0, pi1 = designPlan$pi1, pi2 = designPlan$pi2, groups = designPlan$groups, allocationRatioPlanned = designPlan$allocationRatioPlanned ) } # Fixed designPlan$nFixed <- sampleSizeFixed$nFixed designPlan$.setParameterType("nFixed", C_PARAM_GENERATED) if (designPlan$groups == 2) { designPlan$nFixed1 <- sampleSizeFixed$n1Fixed designPlan$nFixed2 <- sampleSizeFixed$n2Fixed designPlan$.setParameterType("nFixed1", C_PARAM_GENERATED) designPlan$.setParameterType("nFixed2", C_PARAM_GENERATED) designPlan$numberOfSubjects1 <- matrix(designPlan$nFixed1, nrow = 1) designPlan$numberOfSubjects2 <- matrix(designPlan$nFixed2, nrow = 1) } designPlan$numberOfSubjects <- matrix(designPlan$nFixed, nrow = 1) if (!is.null(sampleSizeFixed$allocationRatioPlanned) && (length(designPlan$allocationRatioPlanned) != length(sampleSizeFixed$allocationRatioPlanned) || sum(designPlan$allocationRatioPlanned == sampleSizeFixed$allocationRatioPlanned) != length(designPlan$allocationRatioPlanned))) { designPlan$allocationRatioPlanned <- sampleSizeFixed$allocationRatioPlanned designPlan$.setParameterType("allocationRatioPlanned", C_PARAM_GENERATED) } # Sequential if (designPlan$.design$kMax > 1) { designCharacteristics <- getDesignCharacteristics(designPlan$.design) if (.isTrialDesignPlanMeans(designPlan)) { sampleSizeSequential <- .getSampleSizeSequentialMeans( sampleSizeFixed, designCharacteristics ) } else { sampleSizeSequential <- .getSampleSizeSequentialRates( sampleSizeFixed, designCharacteristics ) } designPlan$informationRates <- sampleSizeSequential$informationRates if (ncol(designPlan$informationRates) == 1 && identical(designPlan$informationRates[, 1], designPlan$.design$informationRates)) { designPlan$.setParameterType("informationRates", C_PARAM_NOT_APPLICABLE) } else { designPlan$.setParameterType("informationRates", C_PARAM_GENERATED) } designPlan$maxNumberOfSubjects <- sampleSizeSequential$maxNumberOfSubjects designPlan$.setParameterType("maxNumberOfSubjects", C_PARAM_GENERATED) if (designPlan$groups == 2) { designPlan$maxNumberOfSubjects1 <- .getNumberOfSubjects1( designPlan$maxNumberOfSubjects, designPlan$allocationRatioPlanned ) designPlan$maxNumberOfSubjects2 <- .getNumberOfSubjects2( designPlan$maxNumberOfSubjects, designPlan$allocationRatioPlanned ) designPlan$.setParameterType("maxNumberOfSubjects1", C_PARAM_GENERATED) designPlan$.setParameterType("maxNumberOfSubjects2", C_PARAM_GENERATED) } designPlan$numberOfSubjects <- sampleSizeSequential$numberOfSubjects designPlan$.setParameterType("numberOfSubjects", C_PARAM_GENERATED) if (designPlan$groups == 2) { designPlan$numberOfSubjects1 <- sampleSizeSequential$numberOfSubjects1 designPlan$numberOfSubjects2 <- sampleSizeSequential$numberOfSubjects2 designPlan$.setParameterType("numberOfSubjects1", C_PARAM_GENERATED) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_GENERATED) } designPlan$expectedNumberOfSubjectsH0 <- sampleSizeSequential$expectedNumberOfSubjectsH0 designPlan$expectedNumberOfSubjectsH01 <- sampleSizeSequential$expectedNumberOfSubjectsH01 designPlan$expectedNumberOfSubjectsH1 <- sampleSizeSequential$expectedNumberOfSubjectsH1 designPlan$.setParameterType("expectedNumberOfSubjectsH0", C_PARAM_GENERATED) designPlan$.setParameterType("expectedNumberOfSubjectsH01", C_PARAM_GENERATED) designPlan$.setParameterType("expectedNumberOfSubjectsH1", C_PARAM_GENERATED) designPlan$.setParameterType("eventsFixed", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("nFixed1", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("nFixed2", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("nFixed", C_PARAM_NOT_APPLICABLE) if (designPlan$allocationRatioPlanned[1] == 1) { designPlan$.setParameterType("numberOfSubjects1", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_NOT_APPLICABLE) } if (!is.null(sampleSizeSequential$rejectPerStage)) { designPlan$rejectPerStage <- matrix(sampleSizeSequential$rejectPerStage, nrow = designPlan$.design$kMax ) designPlan$.setParameterType("rejectPerStage", C_PARAM_GENERATED) designPlan$earlyStop <- sum(designPlan$rejectPerStage[1:(designPlan$.design$kMax - 1), ]) designPlan$.setParameterType("earlyStop", C_PARAM_GENERATED) } if (!is.null(sampleSizeSequential$futilityPerStage) && any(designPlan$.design$futilityBounds != C_FUTILITY_BOUNDS_DEFAULT)) { designPlan$futilityPerStage <- matrix(sampleSizeSequential$futilityPerStage, nrow = designPlan$.design$kMax - 1 ) designPlan$.setParameterType("futilityPerStage", C_PARAM_GENERATED) designPlan$futilityStop <- sum(designPlan$futilityPerStage) designPlan$.setParameterType("futilityStop", C_PARAM_GENERATED) designPlan$earlyStop <- designPlan$earlyStop + sum(designPlan$futilityPerStage) } } .addEffectScaleBoundaryDataToDesignPlan(designPlan) return(designPlan) } else if (.isTrialDesignPlanSurvival(designPlan)) { # Fixed designPlan <- .getSampleSizeFixedSurvival(designPlan) # Sequential if (designPlan$.design$kMax > 1) { designCharacteristics <- getDesignCharacteristics(designPlan$.design) designPlan <- .getSampleSizeSequentialSurvival(designPlan, designCharacteristics) } if (designPlan$accountForObservationTimes && !any(is.na(designPlan$followUpTime)) && all(designPlan$followUpTime == C_FOLLOW_UP_TIME_DEFAULT)) { designPlan$.setParameterType("followUpTime", C_PARAM_DEFAULT_VALUE) } .addEffectScaleBoundaryDataToDesignPlan(designPlan) if (designPlan$.getParameterType("maxNumberOfSubjects") == C_PARAM_GENERATED && designPlan$.accrualTime$.getParameterType("maxNumberOfSubjects") != C_PARAM_GENERATED && all(designPlan$accrualIntensity < 1)) { numberOfDefinedAccrualIntensities <- length(designPlan$accrualIntensity) accrualTime <- designPlan$accrualTime if (length(accrualTime) > 0 && accrualTime[1] != 0) { accrualTime <- c(0, accrualTime) } if (any(designPlan$accrualIntensity < 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'accrualIntensityRelative' (", .arrayToString(designPlan$accrualIntensity), ") must be >= 0" ) } designPlan$accrualIntensityRelative <- designPlan$accrualIntensity if (identical(designPlan$accrualIntensityRelative, C_ACCRUAL_INTENSITY_DEFAULT)) { designPlan$.setParameterType("accrualIntensityRelative", C_PARAM_NOT_APPLICABLE) } else { designPlan$.setParameterType( "accrualIntensityRelative", designPlan$.getParameterType("accrualIntensity") ) } accrualIntensityAbsolute <- c() for (maxNumberOfSubjects in designPlan$maxNumberOfSubjects) { accrualSetup <- getAccrualTime( accrualTime = accrualTime, accrualIntensity = designPlan$accrualIntensityRelative, accrualIntensityType = "relative", maxNumberOfSubjects = maxNumberOfSubjects ) accrualIntensityAbsolute <- c(accrualIntensityAbsolute, accrualSetup$accrualIntensity) } designPlan$accrualIntensity <- accrualIntensityAbsolute designPlan$.setParameterType("accrualIntensity", C_PARAM_GENERATED) if (numberOfDefinedAccrualIntensities > 1) { paramName <- NULL if (designPlan$.getParameterType("pi1") == C_PARAM_USER_DEFINED || designPlan$.getParameterType("pi1") == C_PARAM_DEFAULT_VALUE || designPlan$.getParameterType("pi2") == C_PARAM_USER_DEFINED) { paramName <- "pi1" } else if (designPlan$.getParameterType("median1") == C_PARAM_USER_DEFINED || designPlan$.getParameterType("median2") == C_PARAM_USER_DEFINED) { paramName <- "median1" } if (!is.null(paramName)) { paramValue <- designPlan[[paramName]] if (!is.null(paramValue) && length(paramValue) > 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the definition of relative accrual intensities ", "(all 'accrualIntensity' values < 1) ", "is only available for a single value ", "(", paramName, " = ", .arrayToString( paramValue, vectorLookAndFeelEnabled = TRUE ), ")" ) } } } } designPlan$maxNumberOfEvents <- designPlan$eventsPerStage[designPlan$.design$kMax, ] designPlan$.setParameterType("maxNumberOfEvents", C_PARAM_GENERATED) if (!any(is.na(designPlan$followUpTime))) { if (any(designPlan$followUpTime < -1e-02)) { warning("Accrual duration longer than maximal study ", "duration (time to maximal number of events); followUpTime = ", .arrayToString(designPlan$followUpTime), call. = FALSE ) } } else { warning("Follow-up time could not be calculated for hazardRatio = ", .arrayToString(designPlan$hazardRatio[indices]), call. = FALSE ) } if (designPlan$.getParameterType("accountForObservationTimes") != C_PARAM_USER_DEFINED) { designPlan$.setParameterType("accountForObservationTimes", C_PARAM_NOT_APPLICABLE) } designPlan$.setParameterType("chi", C_PARAM_NOT_APPLICABLE) .addStudyDurationToDesignPlan(designPlan) return(designPlan) } stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "unknown trial plan class '", .getClassName(designPlan), "'") } .getSampleSizeFixedMeans <- function(..., alpha = 0.025, beta = 0.2, sided = 1, twoSidedPower = C_TWO_SIDED_POWER_DEFAULT, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = 0, alternative = C_ALTERNATIVE_DEFAULT, stDev = C_STDEV_DEFAULT, groups = 2, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT) { nFixed <- rep(NA_real_, length(alternative)) for (i in 1:length(alternative)) { theta <- alternative[i] if (groups == 1) { if (sided == 1 || !twoSidedPower) { if (normalApproximation == FALSE) { up <- 2 while (stats::pt( stats::qt(1 - alpha / sided, up - 1), max(0.001, up - 1), sqrt(up) * abs(theta - thetaH0) / stDev ) > beta) { up <- 2 * up } nFixed[i] <- .getOneDimensionalRoot( function(n) { return(stats::pt( stats::qt(1 - alpha / sided, max(0.001, n - 1)), max(0.001, n - 1), sqrt(n) * abs(theta - thetaH0) / stDev ) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04, callingFunctionInformation = ".getSampleSizeFixedMeans" ) } else { nFixed[i] <- (.getOneMinusQNorm(alpha / sided) + .getOneMinusQNorm(beta))^2 / ((theta - thetaH0) / stDev)^2 } } else { up <- 2 while (stats::pt( stats::qt(1 - alpha / 2, max(0.001, up - 1)), max(0.001, up - 1), sqrt(up) * (theta - thetaH0) / stDev ) - stats::pt( -stats::qt(1 - alpha / 2, max(0.001, up - 1)), max(0.001, up - 1), sqrt(up) * (theta - thetaH0) / stDev ) > beta) { up <- 2 * up } if (normalApproximation == FALSE) { nFixed[i] <- .getOneDimensionalRoot( function(n) { return(stats::pt( stats::qt(1 - alpha / 2, max(0.001, n - 1)), max(0.001, n - 1), sqrt(n) * (theta - thetaH0) / stDev ) - stats::pt( -stats::qt(1 - alpha / 2, max(0.001, n - 1)), max(0.001, n - 1), sqrt(n) * (theta - thetaH0) / stDev ) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04, callingFunctionInformation = ".getSampleSizeFixedMeans" ) } else { nFixed[i] <- .getOneDimensionalRoot( function(n) { return(stats::pnorm(.getOneMinusQNorm(alpha / 2) - sqrt(n) * (theta - thetaH0) / stDev) - stats::pnorm(-.getOneMinusQNorm(alpha / 2) - sqrt(n) * (theta - thetaH0) / stDev) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04, callingFunctionInformation = ".getSampleSizeFixedMeans" ) } } } else if (groups == 2) { if (sided == 1 || !twoSidedPower) { if (!meanRatio) { # allocationRatioPlanned = 0 provides optimum sample size if (allocationRatioPlanned == 0) { allocationRatioPlanned <- 1 } if (normalApproximation == FALSE) { up <- 2 while (stats::pt( stats::qt(1 - alpha / sided, up * (1 + allocationRatioPlanned) - 2), up * (1 + allocationRatioPlanned) - 2, sqrt(up) * sqrt(allocationRatioPlanned / (1 + allocationRatioPlanned)) * abs(theta - thetaH0) / stDev ) > beta) { up <- 2 * up } n2Fixed <- .getOneDimensionalRoot( function(x) { return(stats::pt( stats::qt(1 - alpha / sided, max( 0.001, x * (1 + allocationRatioPlanned) - 2 )), max(0.001, x * (1 + allocationRatioPlanned) - 2), sqrt(x) * sqrt(allocationRatioPlanned / (1 + allocationRatioPlanned)) * abs(theta - thetaH0) / stDev ) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04, callingFunctionInformation = ".getSampleSizeFixedMeans" ) nFixed[i] <- n2Fixed * (1 + allocationRatioPlanned) } else { nFixed[i] <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * (.getOneMinusQNorm(alpha / sided) + .getOneMinusQNorm(beta))^2 / ((theta - thetaH0) / stDev)^2 } } else { # allocationRatioPlanned = 0 provides optimum sample size if (allocationRatioPlanned == 0) { allocationRatioPlanned <- 1 / thetaH0 } if (!normalApproximation) { up <- 2 while (stats::pt( stats::qt( 1 - alpha / sided, up * (1 + allocationRatioPlanned) - 2 ), up * (1 + allocationRatioPlanned) - 2, sqrt(up * allocationRatioPlanned / (1 + allocationRatioPlanned * thetaH0^2)) * abs(theta - thetaH0) / stDev ) > beta) { up <- 2 * up } n2Fixed <- .getOneDimensionalRoot( function(n2) { return(stats::pt( stats::qt(1 - alpha / sided, max( 0.001, n2 * (1 + allocationRatioPlanned) - 2 )), max(0.001, n2 * (1 + allocationRatioPlanned) - 2), sqrt(n2 * allocationRatioPlanned / (1 + allocationRatioPlanned * thetaH0^2)) * abs(theta - thetaH0) / stDev ) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04, callingFunctionInformation = ".getSampleSizeFixedMeans" ) nFixed[i] <- n2Fixed * (1 + allocationRatioPlanned) } else { nFixed[i] <- (1 + 1 / allocationRatioPlanned + thetaH0^2 * (1 + allocationRatioPlanned)) * (.getOneMinusQNorm(alpha / sided) + .getOneMinusQNorm(beta))^2 / ((theta - thetaH0) / stDev)^2 } } } else { if (!normalApproximation) { if (allocationRatioPlanned == 0) { allocationRatioPlanned <- 1 } up <- 2 while (stats::pt( stats::qt(1 - alpha / 2, max(0.001, up * (1 + allocationRatioPlanned) - 2)), max(0.001, up * (1 + allocationRatioPlanned) - 2), sqrt(up) * sqrt(allocationRatioPlanned / (1 + allocationRatioPlanned)) * (theta - thetaH0) / stDev ) - stats::pt( -stats::qt( 1 - alpha / 2, up * (1 + allocationRatioPlanned) - 2 ), up * (1 + allocationRatioPlanned) - 2, sqrt(up) * sqrt(allocationRatioPlanned / (1 + allocationRatioPlanned)) * (theta - thetaH0) / stDev ) > beta) { up <- 2 * up } n2Fixed <- .getOneDimensionalRoot( function(n2) { return(stats::pt( stats::qt(1 - alpha / 2, max(0.001, n2 * (1 + allocationRatioPlanned) - 2)), max(0.001, n2 * (1 + allocationRatioPlanned) - 2), sqrt(n2) * sqrt(allocationRatioPlanned / (1 + allocationRatioPlanned)) * (theta - thetaH0) / stDev ) - stats::pt( -stats::qt( 1 - alpha / 2, max(0.001, n2 * (1 + allocationRatioPlanned) - 2) ), max(0.001, n2 * (1 + allocationRatioPlanned) - 2), sqrt(n2) * sqrt(allocationRatioPlanned / (1 + allocationRatioPlanned)) * (theta - thetaH0) / stDev ) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04, callingFunctionInformation = ".getSampleSizeFixedMeans" ) nFixed[i] <- n2Fixed * (1 + allocationRatioPlanned) } else { up <- 2 while (stats::pnorm(.getOneMinusQNorm(alpha / 2) - sqrt(up / 4) * (theta - thetaH0) / stDev) - stats::pnorm(-.getOneMinusQNorm(alpha / 2) - sqrt(up / 4) * (theta - thetaH0) / stDev) > beta) { up <- 2 * up } nFixed[i] <- (1 + allocationRatioPlanned)^2 / (4 * allocationRatioPlanned) * .getOneDimensionalRoot( function(n) { return(stats::pnorm(.getOneMinusQNorm(alpha / 2) - sqrt(n / 4) * (theta - thetaH0) / stDev) - stats::pnorm(-.getOneMinusQNorm(alpha / 2) - sqrt(n / 4) * (theta - thetaH0) / stDev) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04, callingFunctionInformation = ".getSampleSizeFixedMeans" ) } } } } if (groups == 1) { return(list( alpha = alpha, beta = beta, sided = sided, groups = groups, thetaH0 = thetaH0, alternative = alternative, stDev = stDev, normalApproximation = normalApproximation, nFixed = nFixed )) } else if (groups == 2) { n1Fixed <- nFixed * allocationRatioPlanned / (1 + allocationRatioPlanned) n2Fixed <- n1Fixed / allocationRatioPlanned return(list( alpha = alpha, beta = beta, sided = sided, groups = groups, allocationRatioPlanned = allocationRatioPlanned, thetaH0 = thetaH0, meanRatio = meanRatio, alternative = alternative, stDev = stDev, normalApproximation = normalApproximation, n1Fixed = n1Fixed, n2Fixed = n2Fixed, nFixed = nFixed )) } } .getSampleSizeSequentialMeans <- function(fixedSampleSize, designCharacteristics) { kMax <- designCharacteristics$.design$kMax numberOfSubjects <- matrix(NA_real_, kMax, length(fixedSampleSize$alternative)) numberOfSubjects1 <- matrix(NA_real_, kMax, length(fixedSampleSize$alternative)) numberOfSubjects2 <- matrix(NA_real_, kMax, length(fixedSampleSize$alternative)) maxNumberOfSubjects <- rep(NA_real_, length(fixedSampleSize$alternative)) expectedNumberOfSubjectsH0 <- rep(NA_real_, length(fixedSampleSize$alternative)) expectedNumberOfSubjectsH01 <- rep(NA_real_, length(fixedSampleSize$alternative)) expectedNumberOfSubjectsH1 <- rep(NA_real_, length(fixedSampleSize$alternative)) informationRates <- designCharacteristics$information / designCharacteristics$shift for (i in (1:length(fixedSampleSize$alternative))) { maxNumberOfSubjects[i] <- fixedSampleSize$nFixed[i] * designCharacteristics$inflationFactor numberOfSubjects[, i] <- maxNumberOfSubjects[i] * c(informationRates[1], (informationRates[2:kMax] - informationRates[1:(kMax - 1)])) expectedNumberOfSubjectsH0[i] <- designCharacteristics$averageSampleNumber0 * fixedSampleSize$nFixed[i] expectedNumberOfSubjectsH01[i] <- designCharacteristics$averageSampleNumber01 * fixedSampleSize$nFixed[i] expectedNumberOfSubjectsH1[i] <- designCharacteristics$averageSampleNumber1 * fixedSampleSize$nFixed[i] if (fixedSampleSize$groups == 2) { if (length(fixedSampleSize$allocationRatioPlanned) > 1) { allocationRatioPlanned <- fixedSampleSize$allocationRatioPlanned[i] } else { allocationRatioPlanned <- fixedSampleSize$allocationRatioPlanned } numberOfSubjects1[, i] <- numberOfSubjects[, i] * allocationRatioPlanned / (1 + allocationRatioPlanned) numberOfSubjects2[, i] <- numberOfSubjects[, i] / (1 + allocationRatioPlanned) } } if (fixedSampleSize$groups == 1) { return(list( alpha = fixedSampleSize$alpha, beta = fixedSampleSize$beta, sided = fixedSampleSize$sided, groups = fixedSampleSize$groups, thetaH0 = fixedSampleSize$thetaH0, alternative = fixedSampleSize$alternative, stDev = fixedSampleSize$stDev, normalApproximation = fixedSampleSize$normalApproximation, informationRates = matrix(informationRates, ncol = 1), maxNumberOfSubjects = maxNumberOfSubjects, numberOfSubjects = .getColumnCumSum(numberOfSubjects), expectedNumberOfSubjectsH0 = expectedNumberOfSubjectsH0, expectedNumberOfSubjectsH01 = expectedNumberOfSubjectsH01, expectedNumberOfSubjectsH1 = expectedNumberOfSubjectsH1, rejectPerStage = designCharacteristics$rejectionProbabilities, futilityPerStage = designCharacteristics$futilityProbabilities )) } else { return(list( alpha = fixedSampleSize$alpha, beta = fixedSampleSize$beta, sided = fixedSampleSize$sided, groups = fixedSampleSize$groups, allocationRatioPlanned = fixedSampleSize$allocationRatioPlanned, thetaH0 = fixedSampleSize$thetaH0, alternative = fixedSampleSize$alternative, stDev = fixedSampleSize$stDev, normalApproximation = fixedSampleSize$normalApproximation, meanRatio = fixedSampleSize$meanRatio, informationRates = matrix(informationRates, ncol = 1), maxNumberOfSubjects = maxNumberOfSubjects, numberOfSubjects = .getColumnCumSum(numberOfSubjects), numberOfSubjects1 = .getColumnCumSum(numberOfSubjects1), numberOfSubjects2 = .getColumnCumSum(numberOfSubjects2), expectedNumberOfSubjectsH0 = expectedNumberOfSubjectsH0, expectedNumberOfSubjectsH01 = expectedNumberOfSubjectsH01, expectedNumberOfSubjectsH1 = expectedNumberOfSubjectsH1, rejectPerStage = designCharacteristics$rejectionProbabilities, futilityPerStage = designCharacteristics$futilityProbabilities )) } } .getColumnCumSum <- function(x) { if (is.matrix(x)) { result <- x for (i in 1:ncol(x)) { result[, i] <- cumsum(x[, i]) } return(result) } return(cumsum(x)) } .getFarringtonManningValuesDiff <- function(..., rate1, rate2, theta, allocation) { if (theta == 0) { ml1 <- (allocation * rate1 + rate2) / (1 + allocation) ml2 <- ml1 return(c(ml1, ml2)) } a <- 1 + 1 / allocation b <- -(1 + 1 / allocation + rate1 + rate2 / allocation + theta * (1 / allocation + 2)) c <- theta^2 + theta * (2 * rate1 + 1 / allocation + 1) + rate1 + rate2 / allocation d <- -theta * (1 + theta) * rate1 v <- b^3 / (3 * a)^3 - b * c / (6 * a^2) + d / (2 * a) if (!is.na(v) && (v == 0)) { u <- sqrt(b^2 / (3 * a)^2 - c / (3 * a)) w <- acos(-1) / 2 } else { u <- sign(v) * sqrt(b^2 / (3 * a)^2 - c / (3 * a)) w <- 1 / 3 * (acos(-1) + acos(v / u^3)) } ml1 <- min(max(0, 2 * u * cos(w) - b / (3 * a)), 1) ml2 <- min(max(0, ml1 - theta), 1) return(c(ml1, ml2)) } .getFarringtonManningValuesRatio <- function(..., rate1, rate2, theta, allocation) { if (theta == 1) { ml1 <- (allocation * rate1 + rate2) / (1 + allocation) ml2 <- ml1 return(c(ml1, ml2)) } a <- 1 + 1 / allocation b <- -((1 + rate2 / allocation) * theta + 1 / allocation + rate1) c <- (rate1 + rate2 / allocation) * theta ml1 <- (-b - sqrt(b^2 - 4 * a * c)) / (2 * a) ml2 <- ml1 / theta return(c(ml1, ml2)) } # # @title # Get Farrington Manning Values # # @description # Calculates and returns the maximum likelihood estimates under H0. # # @details # Calculation of maximum likelihood estimates under # H0: pi1 - pi2 = theta or H0: pi1 / pi2 = theta # # @references # Farrington & Manning (1990) # Wassmer (2003) # # @keywords internal # .getFarringtonManningValues <- function(rate1, rate2, theta, allocation, method = c("diff", "ratio")) { method <- match.arg(method) if (method == "diff") { ml <- .getFarringtonManningValuesDiff(rate1 = rate1, rate2 = rate2, theta = theta, allocation = allocation) } else { ml <- .getFarringtonManningValuesRatio(rate1 = rate1, rate2 = rate2, theta = theta, allocation = allocation) } return(list(theta = theta, method = method, ml1 = ml[1], ml2 = ml[2])) } .getSampleSizeFixedRates <- function(..., alpha = 0.025, beta = 0.2, sided = 1, normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = 0, pi1 = seq(0.4, 0.6, 0.1), pi2 = 0.2, groups = 2, allocationRatioPlanned = 1) { if (groups == 1) { nFixed <- rep(NA_real_, length(pi1)) for (i in 1:length(pi1)) { if (normalApproximation) { nFixed[i] <- (.getOneMinusQNorm(alpha / sided) * sqrt(thetaH0 * (1 - thetaH0)) + .getOneMinusQNorm(beta) * sqrt(pi1[i] * (1 - pi1[i])))^2 / (pi1[i] - thetaH0)^2 } else { ifelse(pi1[i] > thetaH0, lower.tail <- FALSE, lower.tail <- TRUE) iterations <- 1 if (lower.tail) { nup <- 2 while ((stats::pbinom(stats::qbinom(alpha, nup, thetaH0, lower.tail = lower.tail) - 1, nup, pi1[i], lower.tail = lower.tail ) < 1 - beta) && (iterations <= 50)) { nup <- 2 * nup iterations <- iterations + 1 } if (iterations > 50) { nFixed[i] <- Inf } else { prec <- 2 nlow <- 2 while (prec > 1) { nFixed[i] <- round((nlow + nup) / 2) ifelse(stats::pbinom(stats::qbinom(alpha, nFixed[i], thetaH0, lower.tail = lower.tail) - 1, nFixed[i], pi1[i], lower.tail = lower.tail ) < 1 - beta, nlow <- nFixed[i], nup <- nFixed[i] ) prec <- nup - nlow } if (stats::pbinom(stats::qbinom(alpha, nFixed[i], thetaH0, lower.tail = lower.tail) - 1, nFixed[i], pi1[i], lower.tail = lower.tail ) < 1 - beta) { nFixed[i] <- nFixed[i] + 1 } } } else { nup <- 2 while ((stats::pbinom(stats::qbinom(alpha, nup, thetaH0, lower.tail = lower.tail), nup, pi1[i], lower.tail = lower.tail ) < 1 - beta) && (iterations <= 50)) { nup <- 2 * nup iterations <- iterations + 1 } if (iterations > 50) { nFixed[i] <- Inf } else { prec <- 2 nlow <- 2 while (prec > 1) { nFixed[i] <- round((nlow + nup) / 2) ifelse(stats::pbinom(stats::qbinom(alpha, nFixed[i], thetaH0, lower.tail = lower.tail), nFixed[i], pi1[i], lower.tail = lower.tail ) < 1 - beta, nlow <- nFixed[i], nup <- nFixed[i] ) prec <- nup - nlow } if (stats::pbinom(stats::qbinom(alpha, nFixed[i], thetaH0, lower.tail = lower.tail), nFixed[i], pi1[i], lower.tail = lower.tail ) < 1 - beta) { nFixed[i] <- nFixed[i] + 1 } } } } } return(list( alpha = alpha, beta = beta, sided = sided, groups = groups, thetaH0 = thetaH0, pi1 = pi1, normalApproximation = normalApproximation, nFixed = nFixed )) } if (groups == 2) { n1Fixed <- rep(NA_real_, length(pi1)) n2Fixed <- rep(NA_real_, length(pi1)) nFixed <- rep(NA_real_, length(pi1)) if (allocationRatioPlanned == 0) { allocationRatioPlannedVec <- rep(NA_real_, length(pi1)) } for (i in 1:length(pi1)) { if (!riskRatio) { # allocationRatioPlanned = 0 provides optimum sample size if (allocationRatioPlanned == 0) { allocationRatioPlannedVec[i] <- stats::optimize(function(x) { fm <- .getFarringtonManningValues( rate1 = pi1[i], rate2 = pi2, theta = thetaH0, allocation = x, method = "diff" ) n1 <- (.getOneMinusQNorm(alpha / sided) * sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2 * (1 - fm$ml2) * x) + .getOneMinusQNorm(beta) * sqrt(pi1[i] * (1 - pi1[i]) + pi2 * (1 - pi2) * x))^2 / (pi1[i] - pi2 - thetaH0)^2 return((1 + x) / x * n1) }, interval = c(0, 5), tol = 0.0001)$minimum fm <- .getFarringtonManningValues( rate1 = pi1[i], rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlannedVec[i], method = "diff" ) n1Fixed[i] <- (.getOneMinusQNorm(alpha / sided) * sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2 * (1 - fm$ml2) * allocationRatioPlannedVec[i]) + .getOneMinusQNorm(beta) * sqrt(pi1[i] * (1 - pi1[i]) + pi2 * (1 - pi2) * allocationRatioPlannedVec[i]))^2 / (pi1[i] - pi2 - thetaH0)^2 } else { fm <- .getFarringtonManningValues( rate1 = pi1[i], rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned, method = "diff" ) n1Fixed[i] <- (.getOneMinusQNorm(alpha / sided) * sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2 * (1 - fm$ml2) * allocationRatioPlanned) + .getOneMinusQNorm(beta) * sqrt(pi1[i] * (1 - pi1[i]) + pi2 * (1 - pi2) * allocationRatioPlanned))^2 / (pi1[i] - pi2 - thetaH0)^2 } } else { if (allocationRatioPlanned == 0) { # allocationRatioPlanned = 0 provides optimum sample size allocationRatioPlannedVec[i] <- stats::optimize(function(x) { fm <- .getFarringtonManningValues( rate1 = pi1[i], rate2 = pi2, theta = thetaH0, allocation = x, method = "ratio" ) n1 <- (.getOneMinusQNorm(alpha / sided) * sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2 * (1 - fm$ml2) * x * thetaH0^2) + .getOneMinusQNorm(beta) * sqrt(pi1[i] * (1 - pi1[i]) + pi2 * (1 - pi2) * x * thetaH0^2))^2 / (pi1[i] - thetaH0 * pi2)^2 return((1 + x) / x * n1) }, interval = c(0, 5), tol = 0.0001)$minimum fm <- .getFarringtonManningValues( rate1 = pi1[i], rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlannedVec[i], method = "ratio" ) n1Fixed[i] <- (.getOneMinusQNorm(alpha / sided) * sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2 * (1 - fm$ml2) * allocationRatioPlannedVec[i] * thetaH0^2) + .getOneMinusQNorm(beta) * sqrt(pi1[i] * (1 - pi1[i]) + pi2 * (1 - pi2) * allocationRatioPlannedVec[i] * thetaH0^2))^2 / (pi1[i] - thetaH0 * pi2)^2 } else { fm <- .getFarringtonManningValues( rate1 = pi1[i], rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned, method = "ratio" ) n1Fixed[i] <- (.getOneMinusQNorm(alpha / sided) * sqrt(fm$ml1 * (1 - fm$ml1) + fm$ml2 * (1 - fm$ml2) * allocationRatioPlanned * thetaH0^2) + .getOneMinusQNorm(beta) * sqrt(pi1[i] * (1 - pi1[i]) + pi2 * (1 - pi2) * allocationRatioPlanned * thetaH0^2))^2 / (pi1[i] - thetaH0 * pi2)^2 } } } if (allocationRatioPlanned == 0) { allocationRatioPlanned <- allocationRatioPlannedVec } n2Fixed <- n1Fixed / allocationRatioPlanned nFixed <- n1Fixed + n2Fixed return(list( alpha = alpha, beta = beta, sided = sided, groups = groups, allocationRatioPlanned = allocationRatioPlanned, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, normalApproximation = normalApproximation, riskRatio = riskRatio, n1Fixed = n1Fixed, n2Fixed = n2Fixed, nFixed = nFixed )) } } .getSampleSizeSequentialRates <- function(fixedSampleSize, designCharacteristics) { kMax <- designCharacteristics$.design$kMax numberOfSubjects <- matrix(NA_real_, kMax, length(fixedSampleSize$pi1)) numberOfSubjects1 <- matrix(NA_real_, kMax, length(fixedSampleSize$pi1)) numberOfSubjects2 <- matrix(NA_real_, kMax, length(fixedSampleSize$pi1)) maxNumberOfSubjects <- rep(NA_real_, length(fixedSampleSize$pi1)) expectedNumberOfSubjectsH0 <- rep(NA_real_, length(fixedSampleSize$pi1)) expectedNumberOfSubjectsH01 <- rep(NA_real_, length(fixedSampleSize$pi1)) expectedNumberOfSubjectsH1 <- rep(NA_real_, length(fixedSampleSize$pi1)) informationRates <- designCharacteristics$information / designCharacteristics$shift for (i in 1:length(fixedSampleSize$pi1)) { maxNumberOfSubjects[i] <- fixedSampleSize$nFixed[i] * designCharacteristics$inflationFactor numberOfSubjects[, i] <- maxNumberOfSubjects[i] * c( informationRates[1], (informationRates[2:kMax] - informationRates[1:(kMax - 1)]) ) expectedNumberOfSubjectsH0[i] <- designCharacteristics$averageSampleNumber0 * fixedSampleSize$nFixed[i] expectedNumberOfSubjectsH01[i] <- designCharacteristics$averageSampleNumber01 * fixedSampleSize$nFixed[i] expectedNumberOfSubjectsH1[i] <- designCharacteristics$averageSampleNumber1 * fixedSampleSize$nFixed[i] if (fixedSampleSize$groups == 2) { if (length(fixedSampleSize$allocationRatioPlanned) > 1) { allocationRatioPlanned <- fixedSampleSize$allocationRatioPlanned[i] } else { allocationRatioPlanned <- fixedSampleSize$allocationRatioPlanned } numberOfSubjects1[, i] <- numberOfSubjects[, i] * allocationRatioPlanned / (1 + allocationRatioPlanned) numberOfSubjects2[, i] <- numberOfSubjects[, i] / (1 + allocationRatioPlanned) } } if (fixedSampleSize$groups == 1) { return(list( alpha = fixedSampleSize$alpha, beta = fixedSampleSize$beta, sided = fixedSampleSize$sided, groups = fixedSampleSize$groups, thetaH0 = fixedSampleSize$thetaH0, pi1 = fixedSampleSize$pi1, normalApproximation = fixedSampleSize$normalApproximation, informationRates = matrix(informationRates, ncol = 1), maxNumberOfSubjects = maxNumberOfSubjects, numberOfSubjects = .getColumnCumSum(numberOfSubjects), expectedNumberOfSubjectsH0 = expectedNumberOfSubjectsH0, expectedNumberOfSubjectsH01 = expectedNumberOfSubjectsH01, expectedNumberOfSubjectsH1 = expectedNumberOfSubjectsH1, rejectPerStage = designCharacteristics$rejectionProbabilities, futilityPerStage = designCharacteristics$futilityProbabilities )) } else { return(list( alpha = fixedSampleSize$alpha, beta = fixedSampleSize$beta, sided = fixedSampleSize$sided, groups = fixedSampleSize$groups, allocationRatioPlanned = fixedSampleSize$allocationRatioPlanned, thetaH0 = fixedSampleSize$thetaH0, pi1 = fixedSampleSize$pi1, pi2 = fixedSampleSize$pi2, normalApproximation = fixedSampleSize$normalApproximation, riskRatio = fixedSampleSize$riskRatio, informationRates = matrix(informationRates, ncol = 1), maxNumberOfSubjects = maxNumberOfSubjects, numberOfSubjects = .getColumnCumSum(numberOfSubjects), numberOfSubjects1 = .getColumnCumSum(numberOfSubjects1), numberOfSubjects2 = .getColumnCumSum(numberOfSubjects2), expectedNumberOfSubjectsH0 = expectedNumberOfSubjectsH0, expectedNumberOfSubjectsH01 = expectedNumberOfSubjectsH01, expectedNumberOfSubjectsH1 = expectedNumberOfSubjectsH1, rejectPerStage = designCharacteristics$rejectionProbabilities, futilityPerStage = designCharacteristics$futilityProbabilities )) } } .getPiecewiseExpStartTimesWithoutLeadingZero <- function(piecewiseSurvivalTime) { if (is.null(piecewiseSurvivalTime) || length(piecewiseSurvivalTime) == 0 || all(is.na(piecewiseSurvivalTime))) { return(NA_real_) } if (piecewiseSurvivalTime[1] != 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the first value of 'piecewiseSurvivalTime' (", .arrayToString(piecewiseSurvivalTime), ") must be 0", call. = FALSE ) } if (length(piecewiseSurvivalTime) == 1) { return(numeric(0)) } if (length(piecewiseSurvivalTime) < 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piecewiseSurvivalTime' (", length(piecewiseSurvivalTime), ") must be > 1" ) } return(piecewiseSurvivalTime[2:length(piecewiseSurvivalTime)]) } .getEventProbabilityFunction <- function(..., time, piecewiseLambda, piecewiseSurvivalTime, phi, kappa) { if (length(piecewiseLambda) == 1) { if (kappa != 1 && phi > 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Weibull distribution cannot ", "be used together with specified dropout rate (use simulation instead)", call. = FALSE ) } return(piecewiseLambda / (piecewiseLambda + phi) * pweibull(time, shape = kappa, scale = 1 / (piecewiseLambda + phi), lower.tail = TRUE, log.p = FALSE)) } if (length(piecewiseSurvivalTime) != length(piecewiseLambda)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'piecewiseSurvivalTime' (", .arrayToString(piecewiseSurvivalTime), ") must be equal to length of 'piecewiseLambda' (", .arrayToString(piecewiseLambda), ")" ) } piecewiseSurvivalTime <- .getPiecewiseExpStartTimesWithoutLeadingZero(piecewiseSurvivalTime) if (kappa != 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Weibull distribution cannot be used for piecewise survival definition", call. = FALSE ) } len <- length(piecewiseSurvivalTime) for (i in 1:len) { if (i == 1) { if (time <= piecewiseSurvivalTime[1]) { return(piecewiseLambda[1] / (piecewiseLambda[1] + phi) * (1 - exp(-((piecewiseLambda[1] + phi) * time)))) } } else if (i == 2) { cdfPart <- piecewiseLambda[1] / (piecewiseLambda[1] + phi) * (1 - exp(-((piecewiseLambda[1] + phi) * piecewiseSurvivalTime[1]))) if (time <= piecewiseSurvivalTime[2]) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] cdf <- cdfPart + piecewiseLambda[2] / (piecewiseLambda[2] + phi) * exp(-cdfFactor) * ( exp(-phi * piecewiseSurvivalTime[1]) - exp(-piecewiseLambda[2] * (time - piecewiseSurvivalTime[1]) - phi * time)) return(cdf) } } else if (i == 3) { cdfPart <- cdfPart + piecewiseLambda[2] / (piecewiseLambda[2] + phi) * exp(-piecewiseLambda[1] * piecewiseSurvivalTime[1]) * ( exp(-phi * piecewiseSurvivalTime[1]) - exp(-piecewiseLambda[2] * (piecewiseSurvivalTime[2] - piecewiseSurvivalTime[1]) - phi * piecewiseSurvivalTime[2])) if (time <= piecewiseSurvivalTime[3]) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + piecewiseLambda[2] * (piecewiseSurvivalTime[2] - piecewiseSurvivalTime[1]) cdf <- cdfPart + piecewiseLambda[3] / (piecewiseLambda[3] + phi) * exp(-cdfFactor) * ( exp(-phi * piecewiseSurvivalTime[2]) - exp(-piecewiseLambda[3] * (time - piecewiseSurvivalTime[2]) - phi * time)) return(cdf) } } else if (i > 3) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + sum(piecewiseLambda[2:(i - 2)] * (piecewiseSurvivalTime[2:(i - 2)] - piecewiseSurvivalTime[1:(i - 3)])) cdfPart <- cdfPart + piecewiseLambda[i - 1] / (piecewiseLambda[i - 1] + phi) * exp(-cdfFactor) * ( exp(-phi * piecewiseSurvivalTime[i - 2]) - exp(-piecewiseLambda[i - 1] * (piecewiseSurvivalTime[i - 1] - piecewiseSurvivalTime[i - 2]) - phi * piecewiseSurvivalTime[i - 1])) if (time <= piecewiseSurvivalTime[i]) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + sum(piecewiseLambda[2:(i - 1)] * (piecewiseSurvivalTime[2:(i - 1)] - piecewiseSurvivalTime[1:(i - 2)])) cdf <- cdfPart + piecewiseLambda[i] / (piecewiseLambda[i] + phi) * exp(-cdfFactor) * ( exp(-phi * piecewiseSurvivalTime[i - 1]) - exp(-piecewiseLambda[i] * (time - piecewiseSurvivalTime[i - 1]) - phi * time)) return(cdf) } } } if (len == 1) { cdfPart <- piecewiseLambda[1] / (piecewiseLambda[1] + phi) * (1 - exp(-((piecewiseLambda[1] + phi) * piecewiseSurvivalTime[1]))) } else if (len == 2) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] cdfPart <- cdfPart + piecewiseLambda[len] / (piecewiseLambda[len] + phi) * exp(-cdfFactor) * ( exp(-phi * piecewiseSurvivalTime[len - 1]) - exp(-piecewiseLambda[len] * (piecewiseSurvivalTime[len] - piecewiseSurvivalTime[len - 1]) - phi * piecewiseSurvivalTime[len])) } else { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] + sum(piecewiseLambda[2:(len - 1)] * (piecewiseSurvivalTime[2:(len - 1)] - piecewiseSurvivalTime[1:(len - 2)])) cdfPart <- cdfPart + piecewiseLambda[len] / (piecewiseLambda[len] + phi) * exp(-cdfFactor) * ( exp(-phi * piecewiseSurvivalTime[len - 1]) - exp(-piecewiseLambda[len] * (piecewiseSurvivalTime[len] - piecewiseSurvivalTime[len - 1]) - phi * piecewiseSurvivalTime[len])) } if (len == 1) { cdfFactor <- piecewiseLambda[1] * piecewiseSurvivalTime[1] } else { cdfFactor <- cdfFactor + piecewiseLambda[len] * (piecewiseSurvivalTime[len] - piecewiseSurvivalTime[len - 1]) } cdf <- cdfPart + piecewiseLambda[len + 1] / (piecewiseLambda[len + 1] + phi) * exp(-cdfFactor) * ( exp(-phi * piecewiseSurvivalTime[len]) - exp(-piecewiseLambda[len + 1] * (time - piecewiseSurvivalTime[len]) - phi * time)) return(cdf) } .getEventProbabilityFunctionVec <- function(..., timeVector, piecewiseLambda, piecewiseSurvivalTime, phi, kappa) { result <- c() for (time in timeVector) { result <- c(result, .getEventProbabilityFunction( time = time, piecewiseLambda = piecewiseLambda, piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa )) } return(result) } #' @title #' Get Event Probabilities #' #' @description #' Returns the event probabilities for specified parameters at given time vector. #' #' @param time A numeric vector with time values. #' @inheritParams param_lambda1 #' @inheritParams param_lambda2 #' @inheritParams param_piecewiseSurvivalTime #' @inheritParams param_hazardRatio #' @inheritParams param_kappa #' @inheritParams param_allocationRatioPlanned_sampleSize #' @inheritParams param_accrualTime #' @inheritParams param_accrualIntensity #' @inheritParams param_accrualIntensityType #' @inheritParams param_dropoutRate1 #' @inheritParams param_dropoutRate2 #' @inheritParams param_dropoutTime #' @param maxNumberOfSubjects If \code{maxNumberOfSubjects > 0} is specified, #' the end of accrual at specified \code{accrualIntensity} for the specified #' number of subjects is determined or \code{accrualIntensity} is calculated #' at fixed end of accrual. #' @inheritParams param_three_dots #' #' @details #' The function computes the overall event probabilities in a two treatment groups design. #' For details of the parameters see \code{\link[=getSampleSizeSurvival]{getSampleSizeSurvival()}}. #' #' @return Returns a \code{\link{EventProbabilities}} object. #' The following generics (R generic functions) are available for this result object: #' \itemize{ #' \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, #' \item \code{\link[=print.FieldSet]{print()}} to print the object, #' \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, #' \item \code{\link[=plot.EventProbabilities]{plot()}} to plot the object, #' \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, #' \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. #' } #' @template how_to_get_help_for_generics #' #' @template examples_get_event_probabilities #' #' @export #' getEventProbabilities <- function(time, ..., accrualTime = c(0, 12), # C_ACCRUAL_TIME_DEFAULT accrualIntensity = 0.1, # C_ACCRUAL_INTENSITY_DEFAULT accrualIntensityType = c("auto", "absolute", "relative"), kappa = 1, piecewiseSurvivalTime = NA_real_, lambda2 = NA_real_, lambda1 = NA_real_, allocationRatioPlanned = 1, hazardRatio = NA_real_, dropoutRate1 = 0, # C_DROP_OUT_RATE_1_DEFAULT dropoutRate2 = 0, # C_DROP_OUT_RATE_2_DEFAULT dropoutTime = 12, # C_DROP_OUT_TIME_DEFAULT maxNumberOfSubjects = NA_real_) { .warnInCaseOfUnknownArguments(functionName = "getEventProbabilities", ...) .assertIsNumericVector(time, "time") .assertIsValidMaxNumberOfSubjects(maxNumberOfSubjects, naAllowed = TRUE) .assertIsValidAllocationRatioPlannedSampleSize(allocationRatioPlanned, maxNumberOfSubjects) .assertIsValidKappa(kappa) .assertIsSingleNumber(hazardRatio, "hazardRatio", naAllowed = TRUE) if (!is.na(dropoutTime) && dropoutTime <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dropoutTime' (", dropoutTime, ") must be > 0", call. = FALSE) } if (dropoutRate1 < 0 || dropoutRate1 >= 1) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'dropoutRate1' (", dropoutRate1, ") is out of bounds [0; 1)" ) } if (dropoutRate2 < 0 || dropoutRate2 >= 1) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'dropoutRate2' (", dropoutRate2, ") is out of bounds [0; 1)" ) } accrualSetup <- getAccrualTime( accrualTime = accrualTime, accrualIntensity = accrualIntensity, accrualIntensityType = accrualIntensityType, maxNumberOfSubjects = maxNumberOfSubjects ) accrualTime <- accrualSetup$.getAccrualTimeWithoutLeadingZero() accrualIntensity <- accrualSetup$accrualIntensity maxNumberOfSubjects <- accrualSetup$maxNumberOfSubjects setting <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, hazardRatio = hazardRatio, kappa = kappa, delayedResponseAllowed = TRUE, .lambdaBased = TRUE ) if (!setting$delayedResponseEnabled && length(setting$lambda1) > 1 && setting$.getParameterType("lambda1") == C_PARAM_USER_DEFINED) { warning("Only the first 'lambda1' (", lambda1[1], ") was used to calculate event probabilities", call. = FALSE) setting <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1[1], hazardRatio = hazardRatio, kappa = kappa, delayedResponseAllowed = TRUE, .lambdaBased = TRUE ) } piecewiseSurvivalTime <- setting$piecewiseSurvivalTime lambda2 <- setting$lambda2 lambda1 <- setting$lambda1 hazardRatio <- setting$hazardRatio phi <- -log(1 - c(dropoutRate1, dropoutRate2)) / dropoutTime if (length(accrualTime) != length(accrualIntensity)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'accrualTime' (", (length(accrualTime) + 1), ") must be equal to length of 'accrualIntensity' (", length(accrualIntensity), ")" ) } if (any(accrualIntensity <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all values of 'accrualIntensity' must be > 0") } if (any(accrualTime <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all values of 'accrualTime' must be > 0") } if (kappa != 1 && any(phi > 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "for Weibull distribution (kappa != 1) drop-out rates (phi) cannot be specified" ) } if (any(phi < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all drop-out rates (phi) must be >= 0") } .assertIsNumericVector(lambda2, "lambda2") if (any(lambda2 <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all rates (lambda2) must be > 0") } eventProbabilities <- EventProbabilities( .piecewiseSurvivalTime = setting, .accrualTime = accrualSetup, time = time, accrualTime = accrualTime, accrualIntensity = accrualIntensity, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda1 = lambda1, lambda2 = lambda2, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, maxNumberOfSubjects = maxNumberOfSubjects ) eventProbabilities$.setParameterType("time", C_PARAM_USER_DEFINED) eventProbabilities$.setParameterType( "accrualTime", accrualSetup$.getParameterType("accrualTime") ) eventProbabilities$.setParameterType( "accrualIntensity", accrualSetup$.getParameterType("accrualIntensity") ) eventProbabilities$.setParameterType("kappa", setting$.getParameterType("kappa")) eventProbabilities$.setParameterType( "piecewiseSurvivalTime", setting$.getParameterType("piecewiseSurvivalTime") ) eventProbabilities$.setParameterType("lambda1", setting$.getParameterType("lambda1")) eventProbabilities$.setParameterType("lambda2", setting$.getParameterType("lambda2")) .setValueAndParameterType(eventProbabilities, "allocationRatioPlanned", allocationRatioPlanned, 1) eventProbabilities$.setParameterType("hazardRatio", setting$.getParameterType("hazardRatio")) .setValueAndParameterType(eventProbabilities, "dropoutRate1", dropoutRate1, C_DROP_OUT_RATE_1_DEFAULT) .setValueAndParameterType(eventProbabilities, "dropoutRate2", dropoutRate2, C_DROP_OUT_RATE_2_DEFAULT) .setValueAndParameterType(eventProbabilities, "dropoutTime", dropoutTime, C_DROP_OUT_TIME_DEFAULT) eventProbabilities$.setParameterType( "maxNumberOfSubjects", accrualSetup$.getParameterType("maxNumberOfSubjects") ) eventProbabilities$cumulativeEventProbabilities <- numeric(0) eventProbabilities$overallEventProbabilities <- numeric(0) # deprecated eventProbabilities$eventProbabilities1 <- numeric(0) eventProbabilities$eventProbabilities2 <- numeric(0) for (timeValue in time) { eventProbs <- .getEventProbabilitiesGroupwise( time = timeValue, accrualTimeVector = accrualSetup$.getAccrualTimeWithoutLeadingZero(), accrualIntensity = accrualSetup$accrualIntensity, lambda2 = lambda2, lambda1 = lambda1, piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio ) eventProbabilities$cumulativeEventProbabilities <- c( eventProbabilities$cumulativeEventProbabilities, .getEventProbabilitiesOverall(eventProbs, allocationRatioPlanned) ) eventProbabilities$overallEventProbabilities <- eventProbabilities$cumulativeEventProbabilities # deprecated eventProbabilities$eventProbabilities1 <- c( eventProbabilities$eventProbabilities1, eventProbs[1] ) eventProbabilities$eventProbabilities2 <- c( eventProbabilities$eventProbabilities2, eventProbs[2] ) } eventProbabilities$.setParameterType("cumulativeEventProbabilities", C_PARAM_GENERATED) eventProbabilities$.setParameterType("eventProbabilities1", C_PARAM_GENERATED) eventProbabilities$.setParameterType("eventProbabilities2", C_PARAM_GENERATED) return(eventProbabilities) } #' @title #' Get Number Of Subjects #' #' @description #' Returns the number of recruited subjects at given time vector. #' #' @param time A numeric vector with time values. #' @inheritParams param_accrualTime #' @inheritParams param_accrualIntensity #' @inheritParams param_accrualIntensityType #' @param maxNumberOfSubjects If \code{maxNumberOfSubjects > 0} is specified, #' the end of accrual at specified \code{accrualIntensity} for the specified number of #' subjects is determined or \code{accrualIntensity} is calculated at fixed end of accrual. #' @inheritParams param_three_dots #' #' @details #' Calculate number of subjects over time range at given accrual time vector #' and accrual intensity. Intensity can either be defined in absolute or #' relative terms (for the latter, \code{maxNumberOfSubjects} needs to be defined)\cr #' The function is used by \code{\link[=getSampleSizeSurvival]{getSampleSizeSurvival()}}. #' #' @return Returns a \code{\link{NumberOfSubjects}} object. #' The following generics (R generic functions) are available for this result object: #' \itemize{ #' \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, #' \item \code{\link[=print.FieldSet]{print()}} to print the object, #' \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, #' \item \code{\link[=plot.NumberOfSubjects]{plot()}} to plot the object, #' \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, #' \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. #' } #' @template how_to_get_help_for_generics #' #' @seealso \code{\link{AccrualTime}} for defining the accrual time. #' #' @template examples_get_number_of_subjects #' #' @export #' getNumberOfSubjects <- function(time, ..., accrualTime = c(0, 12), # C_ACCRUAL_TIME_DEFAULT accrualIntensity = 0.1, # C_ACCRUAL_INTENSITY_DEFAULT accrualIntensityType = c("auto", "absolute", "relative"), maxNumberOfSubjects = NA_real_) { .warnInCaseOfUnknownArguments(functionName = "getNumberOfSubjects", ...) .assertIsNumericVector(time, "time") accrualSetup <- getAccrualTime( accrualTime = accrualTime, accrualIntensity = accrualIntensity, accrualIntensityType = accrualIntensityType, maxNumberOfSubjects = maxNumberOfSubjects ) accrualTime <- accrualSetup$.getAccrualTimeWithoutLeadingZero() accrualIntensity <- accrualSetup$accrualIntensity maxNumberOfSubjects <- accrualSetup$maxNumberOfSubjects if (length(accrualTime) != length(accrualIntensity)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'accrualTime' (", length(accrualTime), ") must be equal to length of 'accrualIntensity' (", length(accrualIntensity), ")" ) } if (any(accrualIntensity < 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all values of 'accrualIntensity' must be >= 0") } if (all(accrualIntensity < 1)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "at least one value of 'accrualIntensity' must be >= 1") } if (any(accrualTime <= 0)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "all values of 'accrualTime' must be > 0") } numberOfSubjects <- .getNumberOfSubjects( time = time, accrualTime = accrualTime, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects ) result <- NumberOfSubjects( .accrualTime = accrualSetup, time = time, accrualTime = accrualTime, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects, numberOfSubjects = numberOfSubjects ) result$.setParameterType("time", C_PARAM_USER_DEFINED) result$.setParameterType("accrualTime", accrualSetup$.getParameterType("accrualTime")) result$.setParameterType("accrualIntensity", accrualSetup$.getParameterType("accrualIntensity")) result$.setParameterType("maxNumberOfSubjects", accrualSetup$.getParameterType("maxNumberOfSubjects")) result$.setParameterType("numberOfSubjects", C_PARAM_GENERATED) return(result) } .getLambda <- function(..., groupNumber, lambda2, lambda1, hazardRatio, kappa) { if (groupNumber == 1) { if (!any(is.na(lambda1))) { return(lambda1) } lambda2 <- lambda2 * hazardRatio^(1 / kappa) } return(lambda2) } .getEventProbabilitiesGroupwise <- function(..., time, accrualTimeVector, accrualIntensity, lambda2, lambda1, piecewiseSurvivalTime, phi, kappa, allocationRatioPlanned, hazardRatio) { .assertIsSingleNumber(time, "time") if (length(accrualTimeVector) > 1 && accrualTimeVector[1] == 0) { accrualTimeVector <- accrualTimeVector[2:length(accrualTimeVector)] } accrualTimeVectorLength <- length(accrualTimeVector) densityIntervals <- accrualTimeVector if (accrualTimeVectorLength > 1) { densityIntervals[2:accrualTimeVectorLength] <- accrualTimeVector[2:accrualTimeVectorLength] - accrualTimeVector[1:(accrualTimeVectorLength - 1)] } if (length(densityIntervals) > 1 && length(accrualIntensity) > 1 && length(densityIntervals) != length(accrualIntensity)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'densityIntervals' (", .arrayToString(densityIntervals), ") and 'accrualIntensity' (", .arrayToString(accrualIntensity), ") must have same length" ) } densityVector <- accrualIntensity / sum(densityIntervals * accrualIntensity) eventProbs <- rep(NA_real_, 2) for (k in 1:accrualTimeVectorLength) { if (time <= accrualTimeVector[k]) { for (groupNumber in c(1, 2)) { # two groups: 1 = treatment, 2 = control lambdaTemp <- .getLambda( groupNumber = groupNumber, lambda2 = lambda2, lambda1 = lambda1, hazardRatio = hazardRatio, kappa = kappa ) inner <- function(x) { .getEventProbabilityFunctionVec( timeVector = x, piecewiseLambda = lambdaTemp, piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi[groupNumber], kappa = kappa ) } timeValue1 <- 0 if (k > 1) { timeValue1 <- time - accrualTimeVector[1] } eventProbs[groupNumber] <- densityVector[1] * integrate(inner, timeValue1, time)$value if (k > 2) { for (j in 2:(k - 1)) { eventProbs[groupNumber] <- eventProbs[groupNumber] + densityVector[j] * integrate( inner, time - accrualTimeVector[j], time - accrualTimeVector[j - 1] )$value } } if (k > 1) { eventProbs[groupNumber] <- eventProbs[groupNumber] + densityVector[k] * integrate(inner, 0, time - accrualTimeVector[k - 1])$value } } return(eventProbs) } } for (groupNumber in c(1, 2)) { lambdaTemp <- .getLambda( groupNumber = groupNumber, lambda2 = lambda2, lambda1 = lambda1, hazardRatio = hazardRatio, kappa = kappa ) inner <- function(x) { .getEventProbabilityFunctionVec( timeVector = x, piecewiseLambda = lambdaTemp, piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi[groupNumber], kappa = kappa ) } eventProbs[groupNumber] <- densityVector[1] * integrate(inner, time - accrualTimeVector[1], time)$value if (accrualTimeVectorLength > 1) { for (j in (2:accrualTimeVectorLength)) { eventProbs[groupNumber] <- eventProbs[groupNumber] + densityVector[j] * integrate( inner, time - accrualTimeVector[j], time - accrualTimeVector[j - 1] )$value } } } return(eventProbs) } .getEventProbabilitiesOverall <- function(eventProbs, allocationRatioPlanned) { return((allocationRatioPlanned * eventProbs[1] + eventProbs[2]) / (1 + allocationRatioPlanned)) } .getEventProbabilities <- function(..., time, accrualTimeVector, accrualIntensity, lambda2, lambda1, piecewiseSurvivalTime, phi, kappa, allocationRatioPlanned, hazardRatio) { eventProbs <- .getEventProbabilitiesGroupwise( time = time, accrualTimeVector = accrualTimeVector, accrualIntensity = accrualIntensity, lambda2 = lambda2, lambda1 = lambda1, piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio ) return(.getEventProbabilitiesOverall(eventProbs, allocationRatioPlanned)) } .getEventsFixed <- function(..., typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), twoSidedPower, alpha, beta, sided, hazardRatio, thetaH0, allocationRatioPlanned) { typeOfComputation <- match.arg(typeOfComputation) if (typeOfComputation == "Schoenfeld") { eventsFixed <- (.getOneMinusQNorm(alpha / sided) + .getOneMinusQNorm(beta))^2 / (log(hazardRatio) - log(thetaH0))^2 * (1 + allocationRatioPlanned)^2 / allocationRatioPlanned if (twoSidedPower && (sided == 2)) { up <- 2 * eventsFixed eventsFixed <- .getOneDimensionalRoot( function(n) { return(stats::pnorm(.getOneMinusQNorm(alpha / 2) - sqrt(n) * (log(hazardRatio) - log(thetaH0)) * sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned)) - stats::pnorm(-.getOneMinusQNorm(alpha / 2) - sqrt(n) * (log(hazardRatio) - log(thetaH0)) * sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned)) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04, callingFunctionInformation = ".getEventsFixed" ) } return(eventsFixed) } if (typeOfComputation == "Freedman") { eventsFixed <- (.getOneMinusQNorm(alpha / sided) + .getOneMinusQNorm(beta))^2 * (1 + hazardRatio * allocationRatioPlanned)^2 / (1 - hazardRatio)^2 / allocationRatioPlanned if (twoSidedPower && (sided == 2)) { up <- 2 * eventsFixed eventsFixed <- .getOneDimensionalRoot( function(n) { return(stats::pnorm(.getOneMinusQNorm(alpha / 2) - sqrt(n) * sqrt(allocationRatioPlanned) * (1 - hazardRatio) / (1 + allocationRatioPlanned * hazardRatio)) - stats::pnorm(-.getOneMinusQNorm(alpha / 2) - sqrt(n) * sqrt(allocationRatioPlanned) * (1 - hazardRatio) / (1 + allocationRatioPlanned * hazardRatio)) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04, callingFunctionInformation = ".getEventsFixed" ) } return(eventsFixed) } if (typeOfComputation == "HsiehFreedman") { eventsFixed <- (.getOneMinusQNorm(alpha / sided) + .getOneMinusQNorm(beta))^2 * (1 + hazardRatio)^2 / (1 - hazardRatio)^2 * (1 + allocationRatioPlanned)^2 / (4 * allocationRatioPlanned) if (twoSidedPower && sided == 2) { up <- 2 * eventsFixed eventsFixed <- .getOneDimensionalRoot( function(n) { return(stats::pnorm(.getOneMinusQNorm(alpha / 2) - sqrt(n) * 2 * sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) * (1 - hazardRatio) / (1 + hazardRatio)) - stats::pnorm(-.getOneMinusQNorm(alpha / 2) - sqrt(n) * 2 * sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) * (1 - hazardRatio) / (1 + hazardRatio)) - beta) }, lower = 0.001, upper = up, tolerance = 1e-04, callingFunctionInformation = ".getEventsFixed" ) } return(eventsFixed) } } .getSampleSizeFixedSurvival <- function(designPlan) { alpha <- designPlan$getAlpha() beta <- designPlan$getBeta() sided <- designPlan$getSided() twoSidedPower <- designPlan$getTwoSidedPower() typeOfComputation <- designPlan$typeOfComputation thetaH0 <- designPlan$thetaH0 pi1 <- designPlan$pi1 pi2 <- designPlan$pi2 allocationRatioPlanned <- designPlan$allocationRatioPlanned accountForObservationTimes <- designPlan$accountForObservationTimes accrualTime <- designPlan$accrualTime kappa <- designPlan$kappa piecewiseSurvivalTime <- designPlan$piecewiseSurvivalTime maxNumberOfSubjects <- designPlan$maxNumberOfSubjects hazardRatio <- designPlan$hazardRatio .assertIsValidHazardRatio(hazardRatio, thetaH0) if (designPlan$.piecewiseSurvivalTime$.isLambdaBased()) { numberOfResults <- length(hazardRatio) } else { numberOfResults <- length(pi1) } designPlan$eventsFixed <- rep(NA_real_, numberOfResults) # number of events designPlan$nFixed <- rep(NA_real_, numberOfResults) # number of subjects designPlan$chi <- rep(NA_real_, numberOfResults) # probability of an event calculateAllocationRatioPlanned <- FALSE if (allocationRatioPlanned == 0) { allocationRatioPlannedVec <- rep(NA_real_, numberOfResults) calculateAllocationRatioPlanned <- TRUE designPlan$optimumAllocationRatio <- TRUE designPlan$.setParameterType("optimumAllocationRatio", C_PARAM_USER_DEFINED) } userDefinedMaxNumberOfSubjects <- .isUserDefinedMaxNumberOfSubjects(designPlan) if (userDefinedMaxNumberOfSubjects && allocationRatioPlanned == 0) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "determination of optimum allocation ('allocationRatioPlanned' = 0) not possible ", "for given 'maxNumberOfSubjects' (", designPlan$maxNumberOfSubjects, ")" ) } if (userDefinedMaxNumberOfSubjects) { timeVector <- rep(NA_real_, numberOfResults) } designPlan$.calculateFollowUpTime <- FALSE lambda1 <- designPlan$lambda1 if (designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { lambda1 <- rep(NA_real_, numberOfResults) } for (i in 1:numberOfResults) { phi <- -c( log(1 - designPlan$dropoutRate1), log(1 - designPlan$dropoutRate2) ) / designPlan$dropoutTime if (!userDefinedMaxNumberOfSubjects) { if (calculateAllocationRatioPlanned) { # allocationRatioPlanned = 0 provides optimum sample size allocationRatioPlanned <- stats::optimize(function(x) { numberEvents <- .getEventsFixed( typeOfComputation = typeOfComputation, twoSidedPower = twoSidedPower, alpha = alpha, beta = beta, sided = sided, hazardRatio = hazardRatio[i], thetaH0 = thetaH0, allocationRatioPlanned = x ) if (!accountForObservationTimes) { probEvent <- (x * pi1[i] + pi2) / (1 + x) } else { probEvent <- .getEventProbabilities( time = accrualTime[length(accrualTime)] + designPlan$followUpTime, accrualTimeVector = accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = x, hazardRatio = hazardRatio[i] ) } return(numberEvents / probEvent) }, interval = c(0, 5), tol = 0.0001)$minimum allocationRatioPlannedVec[i] <- allocationRatioPlanned } designPlan$eventsFixed[i] <- .getEventsFixed( typeOfComputation = typeOfComputation, twoSidedPower = twoSidedPower, alpha = alpha, beta = beta, sided = sided, hazardRatio = hazardRatio[i], thetaH0 = thetaH0, allocationRatioPlanned = allocationRatioPlanned ) if (!accountForObservationTimes) { designPlan$chi[i] <- (allocationRatioPlanned * pi1[i] + pi2) / (1 + allocationRatioPlanned) } else { designPlan$chi[i] <- .getEventProbabilities( time = accrualTime[length(accrualTime)] + designPlan$followUpTime, accrualTimeVector = accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio[i] ) } designPlan$.setParameterType("chi", C_PARAM_GENERATED) designPlan$nFixed[i] <- designPlan$eventsFixed[i] / designPlan$chi[i] } else { if (length(maxNumberOfSubjects) > 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of user defined 'maxNumberOfSubjects' (", .arrayToString(maxNumberOfSubjects), ") must be 1" ) } designPlan$.calculateFollowUpTime <- TRUE designPlan$eventsFixed[i] <- .getEventsFixed( typeOfComputation = typeOfComputation, twoSidedPower = twoSidedPower, alpha = alpha, beta = beta, sided = sided, hazardRatio = hazardRatio[i], thetaH0 = thetaH0, allocationRatioPlanned = allocationRatioPlanned ) designPlan$nFixed[i] <- maxNumberOfSubjects if (designPlan$eventsFixed[i] > maxNumberOfSubjects) { if (length(hazardRatio) > 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf( paste0( "'maxNumberOfSubjects' (%s) is smaller than the number ", "of events (%.3f) at index %s (hazard ratio = %.3f)" ), maxNumberOfSubjects, designPlan$eventsFixed[i], i, hazardRatio[i] ) ) } else { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf( paste0( "'maxNumberOfSubjects' (%s) is smaller than the number ", "of events (%.3f)" ), maxNumberOfSubjects, designPlan$eventsFixed[i] ) ) } } up <- 2 iterate <- 1 while (designPlan$eventsFixed[i] / .getEventProbabilities( time = up, accrualTimeVector = accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio[i] ) > maxNumberOfSubjects) { up <- 2 * up iterate <- iterate + 1 if (iterate > 50) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the number of subjects is too small to reach maximum number of events ", "(presumably due to drop-out rates), search algorithm failed" ) } } timeVector[i] <- .getOneDimensionalRoot( function(x) { designPlan$eventsFixed[i] / .getEventProbabilities( time = x, accrualTimeVector = accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio[i] ) - maxNumberOfSubjects }, lower = 0, upper = up, tolerance = 1e-06, acceptResultsOutOfTolerance = TRUE, callingFunctionInformation = ".getSampleSizeSequentialSurvival" ) if (!is.na(timeVector[i])) { designPlan$chi[i] <- .getEventProbabilities( time = timeVector[i], accrualTimeVector = accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = piecewiseSurvivalTime, phi = phi, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = hazardRatio[i] ) designPlan$.setParameterType("chi", C_PARAM_GENERATED) } } } if (calculateAllocationRatioPlanned) { allocationRatioPlanned <- allocationRatioPlannedVec designPlan$allocationRatioPlanned <- allocationRatioPlanned designPlan$.setParameterType("allocationRatioPlanned", C_PARAM_GENERATED) } if (userDefinedMaxNumberOfSubjects) { designPlan$followUpTime <- timeVector - accrualTime[length(accrualTime)] designPlan$.setParameterType("followUpTime", C_PARAM_GENERATED) } designPlan$nFixed2 <- designPlan$nFixed / (1 + allocationRatioPlanned) designPlan$nFixed1 <- designPlan$nFixed2 * allocationRatioPlanned if (designPlan$.design$kMax == 1 && designPlan$.accrualTime$.isRelativeAccrualIntensity(designPlan$accrualIntensity)) { designPlan$accrualIntensity <- designPlan$nFixed / designPlan$accrualTime designPlan$.setParameterType("accrualIntensity", C_PARAM_GENERATED) } designPlan$numberOfSubjects1 <- matrix(designPlan$nFixed1, nrow = 1) designPlan$numberOfSubjects2 <- matrix(designPlan$nFixed2, nrow = 1) if (!designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { eventRatio <- allocationRatioPlanned * pi1 / pi2 } else { eventRatio <- NA_real_ } # Fixed designPlan$hazardRatio <- hazardRatio designPlan$expectedEventsH1 <- designPlan$eventsFixed designPlan$maxNumberOfSubjects <- designPlan$nFixed designPlan$numberOfSubjects <- matrix(designPlan$nFixed, nrow = 1) designPlan$.setParameterType("eventsFixed", C_PARAM_GENERATED) designPlan$.setParameterType("nFixed1", C_PARAM_GENERATED) designPlan$.setParameterType("nFixed2", C_PARAM_GENERATED) designPlan$.setParameterType("nFixed", C_PARAM_GENERATED) if (designPlan$accountForObservationTimes) { designPlan$analysisTime <- matrix(accrualTime[length(accrualTime)] + designPlan$followUpTime, nrow = 1) designPlan$.setParameterType("analysisTime", C_PARAM_GENERATED) } return(designPlan) } # note that fixed sample size must be calculated before on 'designPlan' .getSampleSizeSequentialSurvival <- function(designPlan, designCharacteristics) { if (designPlan$.piecewiseSurvivalTime$.isLambdaBased()) { numberOfResults <- length(designPlan$hazardRatio) } else { numberOfResults <- length(designPlan$pi1) } kMax <- designCharacteristics$.design$kMax designPlan$eventsPerStage <- matrix(NA_real_, kMax, numberOfResults) analysisTime <- matrix(NA_real_, kMax, numberOfResults) numberOfSubjects <- matrix(NA_real_, kMax, numberOfResults) designPlan$expectedEventsH0 <- rep(NA_real_, numberOfResults) designPlan$expectedEventsH01 <- rep(NA_real_, numberOfResults) designPlan$expectedEventsH1 <- rep(NA_real_, numberOfResults) expectedNumberOfSubjectsH1 <- rep(NA_real_, numberOfResults) studyDuration <- rep(NA_real_, numberOfResults) designPlan$chi <- rep(NA_real_, numberOfResults) informationRates <- designCharacteristics$information / designCharacteristics$shift lambda1 <- designPlan$lambda1 if (designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { lambda1 <- rep(NA_real_, numberOfResults) } if (designPlan$accountForObservationTimes && designPlan$.calculateFollowUpTime) { designPlan$followUpTime <- rep(NA_real_, numberOfResults) } for (i in 1:numberOfResults) { designPlan$eventsPerStage[, i] <- designPlan$eventsFixed[i] * informationRates * designCharacteristics$inflationFactor if (!designPlan$accountForObservationTimes) { if (length(designPlan$allocationRatioPlanned) > 1) { allocationRatioPlanned <- designPlan$allocationRatioPlanned[i] } else { allocationRatioPlanned <- designPlan$allocationRatioPlanned } designPlan$chi[i] <- (allocationRatioPlanned * designPlan$pi1[i] + designPlan$pi2) / (1 + allocationRatioPlanned) designPlan$.setParameterType("chi", C_PARAM_GENERATED) numberOfSubjects[kMax, i] <- designPlan$eventsPerStage[kMax, i] / designPlan$chi[i] } else { phi <- -c(log(1 - designPlan$dropoutRate1), log(1 - designPlan$dropoutRate2)) / designPlan$dropoutTime if (designPlan$.calculateFollowUpTime) { if (designPlan$eventsPerStage[kMax, i] > designPlan$maxNumberOfSubjects[i]) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sprintf( paste0( "the number of subjects (%s) is smaller than the number ", "of events (%s) at stage %s" ), designPlan$maxNumberOfSubjects[i], designPlan$eventsPerStage[kMax, i], i ) ) } up <- 2 iterate <- 1 while (designPlan$eventsPerStage[kMax, i] / .getEventProbabilities( time = up, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, phi = phi, kappa = designPlan$kappa, allocationRatioPlanned = designPlan$allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i] ) > designPlan$maxNumberOfSubjects[i]) { up <- 2 * up iterate <- iterate + 1 if (iterate > 50) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the number of subjects is too small to reach maximum number of events ", "(presumably due to drop-out rates)" ) } } totalTime <- .getOneDimensionalRoot( function(x) { designPlan$eventsPerStage[kMax, i] / designPlan$maxNumberOfSubjects[i] - .getEventProbabilities( time = x, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, phi = phi, kappa = designPlan$kappa, allocationRatioPlanned = designPlan$allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i] ) }, lower = 0, upper = up, tolerance = 1e-06, callingFunctionInformation = ".getSampleSizeSequentialSurvival" ) # analysis times for (j in 1:kMax) { analysisTime[j, i] <- .getOneDimensionalRoot( function(x) { designPlan$eventsPerStage[j, i] / designPlan$maxNumberOfSubjects[i] - .getEventProbabilities( time = x, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, phi = phi, kappa = designPlan$kappa, allocationRatioPlanned = designPlan$allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i] ) }, lower = 0, upper = totalTime, tolerance = 1e-06, acceptResultsOutOfTolerance = TRUE, callingFunctionInformation = ".getSampleSizeSequentialSurvival" ) } analysisTime[kMax, i] <- totalTime designPlan$followUpTime[i] <- totalTime - designPlan$accrualTime[length(designPlan$accrualTime)] numberOfSubjects[, i] <- .getNumberOfSubjects( time = analysisTime[, i], accrualTime = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, maxNumberOfSubjects = designPlan$maxNumberOfSubjects[i] ) } else { if (length(designPlan$allocationRatioPlanned) > 1) { allocationRatioPlanned <- designPlan$allocationRatioPlanned[i] } else { allocationRatioPlanned <- designPlan$allocationRatioPlanned } if (is.na(designPlan$followUpTime)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'followUpTime' must be defined because 'designPlan$.calculateFollowUpTime' = FALSE" ) } designPlan$chi[i] <- .getEventProbabilities( time = designPlan$accrualTime[length(designPlan$accrualTime)] + designPlan$followUpTime, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, phi = phi, kappa = designPlan$kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i] ) designPlan$.setParameterType("chi", C_PARAM_GENERATED) numberOfSubjects[kMax, i] <- designPlan$eventsPerStage[kMax, i] / designPlan$chi[i] # Analysis times for (j in 1:(kMax - 1)) { analysisTime[j, i] <- .getOneDimensionalRoot( function(x) { designPlan$eventsPerStage[j, i] / numberOfSubjects[kMax, i] - .getEventProbabilities( time = x, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, phi = phi, kappa = designPlan$kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i] ) }, lower = 0, upper = designPlan$accrualTime[length(designPlan$accrualTime)] + designPlan$followUpTime, tolerance = 1e-06, callingFunctionInformation = ".getSampleSizeSequentialSurvival" ) } analysisTime[kMax, i] <- designPlan$accrualTime[length(designPlan$accrualTime)] + designPlan$followUpTime numberOfSubjects[, i] <- .getNumberOfSubjects( time = analysisTime[, i], accrualTime = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, maxNumberOfSubjects = numberOfSubjects[kMax, i] ) } stoppingProbs <- designCharacteristics$rejectionProbabilities + c(designCharacteristics$futilityProbabilities, 0) if (all(is.na(designCharacteristics$futilityProbabilities))) { warning("Expected number of subjects H1 and study duration H1 ", "cannot be calculated because the futility probabilities ", "are not applicable for the specified design", call. = FALSE ) } stoppingProbs[kMax] <- 1 - sum(stoppingProbs[1:(kMax - 1)]) studyDuration[i] <- analysisTime[, i] %*% stoppingProbs expectedNumberOfSubjectsH1[i] <- numberOfSubjects[, i] %*% stoppingProbs } designPlan$expectedEventsH0[i] <- designCharacteristics$averageSampleNumber0 * designPlan$eventsFixed[i] designPlan$expectedEventsH01[i] <- designCharacteristics$averageSampleNumber01 * designPlan$eventsFixed[i] designPlan$expectedEventsH1[i] <- designCharacteristics$averageSampleNumber1 * designPlan$eventsFixed[i] designPlan$.setParameterType("expectedEventsH0", C_PARAM_GENERATED) designPlan$.setParameterType("expectedEventsH01", C_PARAM_GENERATED) designPlan$.setParameterType("expectedEventsH1", C_PARAM_GENERATED) designPlan$numberOfSubjects2 <- numberOfSubjects / (1 + designPlan$allocationRatioPlanned) designPlan$numberOfSubjects1 <- designPlan$numberOfSubjects2 * designPlan$allocationRatioPlanned designPlan$.setParameterType("numberOfSubjects1", C_PARAM_GENERATED) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_GENERATED) } if (!is.null(designCharacteristics$rejectionProbabilities)) { designPlan$rejectPerStage <- matrix(designCharacteristics$rejectionProbabilities, nrow = designPlan$.design$kMax ) designPlan$.setParameterType("rejectPerStage", C_PARAM_GENERATED) designPlan$earlyStop <- sum(designPlan$rejectPerStage[1:(designPlan$.design$kMax - 1), ]) designPlan$.setParameterType("earlyStop", C_PARAM_GENERATED) } if (!is.null(designCharacteristics$futilityProbabilities) && any(designPlan$.design$futilityBounds != C_FUTILITY_BOUNDS_DEFAULT)) { designPlan$futilityPerStage <- matrix(designCharacteristics$futilityProbabilities, nrow = designPlan$.design$kMax - 1 ) designPlan$.setParameterType("futilityPerStage", C_PARAM_GENERATED) designPlan$futilityStop <- sum(designPlan$futilityPerStage) designPlan$.setParameterType("futilityStop", C_PARAM_GENERATED) designPlan$earlyStop <- designPlan$earlyStop + sum(designPlan$futilityPerStage) } designPlan$informationRates <- matrix(informationRates, ncol = 1) if (!is.matrix(numberOfSubjects)) { designPlan$numberOfSubjects <- matrix(numberOfSubjects[kMax, ], nrow = 1) } else { designPlan$numberOfSubjects <- numberOfSubjects } designPlan$maxNumberOfSubjects <- designPlan$numberOfSubjects[kMax, ] if (designPlan$.getParameterType("maxNumberOfSubjects") == C_PARAM_NOT_APPLICABLE || length(designPlan$maxNumberOfSubjects) > 1) { designPlan$.setParameterType("maxNumberOfSubjects", C_PARAM_GENERATED) } designPlan$maxNumberOfSubjects1 <- .getNumberOfSubjects1( designPlan$maxNumberOfSubjects, designPlan$allocationRatioPlanned ) designPlan$maxNumberOfSubjects2 <- .getNumberOfSubjects2( designPlan$maxNumberOfSubjects, designPlan$allocationRatioPlanned ) designPlan$.setParameterType("maxNumberOfSubjects1", C_PARAM_GENERATED) designPlan$.setParameterType("maxNumberOfSubjects2", C_PARAM_GENERATED) if (ncol(designPlan$informationRates) == 1 && identical(designPlan$informationRates[, 1], designPlan$.design$informationRates)) { designPlan$.setParameterType("informationRates", C_PARAM_NOT_APPLICABLE) } else { designPlan$.setParameterType("informationRates", C_PARAM_GENERATED) } designPlan$.setParameterType("numberOfSubjects", C_PARAM_GENERATED) designPlan$.setParameterType("eventsPerStage", C_PARAM_GENERATED) if (designPlan$accountForObservationTimes) { designPlan$analysisTime <- analysisTime designPlan$expectedNumberOfSubjectsH1 <- expectedNumberOfSubjectsH1 designPlan$studyDuration <- studyDuration designPlan$studyDurationH1 <- studyDuration # deprecated designPlan$.setParameterType("analysisTime", C_PARAM_GENERATED) designPlan$.setParameterType("expectedNumberOfSubjectsH1", C_PARAM_GENERATED) designPlan$.setParameterType("studyDuration", C_PARAM_GENERATED) } designPlan$.setParameterType("eventsFixed", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("nFixed1", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("nFixed2", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("nFixed", C_PARAM_NOT_APPLICABLE) if (designPlan$allocationRatioPlanned[1] == 1) { designPlan$.setParameterType("numberOfSubjects1", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_NOT_APPLICABLE) } designPlan$.calculateFollowUpTime <- NA return(designPlan) } # Note that 'directionUpper' and 'maxNumberOfSubjects' are only applicable # for 'objectType' = "power" .createDesignPlanMeans <- function(..., objectType = c("sampleSize", "power"), design, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = NA_real_, stDev = C_STDEV_DEFAULT, directionUpper = NA, maxNumberOfSubjects = NA_real_, groups = 2, allocationRatioPlanned = NA_real_) { objectType <- match.arg(objectType) .assertIsTrialDesignInverseNormalOrGroupSequential(design) .assertIsValidAlphaAndBeta(design$alpha, design$beta) .assertIsValidSidedParameter(design$sided) .assertIsValidStandardDeviation(stDev) .assertIsValidGroupsParameter(groups) .assertIsSingleNumber(thetaH0, "thetaH0") .assertIsSingleLogical(meanRatio, "meanRatio") .assertIsValidThetaH0(thetaH0, endpoint = "means", groups = groups, ratioEnabled = meanRatio) .assertIsSingleLogical(normalApproximation, "normalApproximation") if (meanRatio) { if (identical(alternative, C_ALTERNATIVE_POWER_SIMULATION_DEFAULT)) { alternative <- C_ALTERNATIVE_POWER_SIMULATION_MEAN_RATIO_DEFAULT } .assertIsInOpenInterval(alternative, "alternative", 0, NULL, naAllowed = TRUE) } .assertIsValidDirectionUpper(directionUpper, design$sided, objectType, userFunctionCallEnabled = TRUE) if (objectType == "sampleSize" && !any(is.na(alternative))) { if (design$sided == 1 && any(alternative - thetaH0 <= 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any 'alternative' (", .arrayToString(alternative), ") must be > 'thetaH0' (", thetaH0, ")" ) } if (any(alternative - thetaH0 == 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any 'alternative' (", .arrayToString(alternative), ") must be != 'thetaH0' (", thetaH0, ")" ) } } designPlan <- TrialDesignPlanMeans(design = design, meanRatio = meanRatio) designPlan$.setSampleSizeObject(objectType) designPlan$criticalValuesPValueScale <- matrix(design$stageLevels, ncol = 1) if (design$sided == 2) { designPlan$criticalValuesPValueScale <- designPlan$criticalValuesPValueScale * 2 designPlan$.setParameterType("criticalValuesPValueScale", C_PARAM_GENERATED) } if (any(design$futilityBounds > C_FUTILITY_BOUNDS_DEFAULT)) { designPlan$futilityBoundsPValueScale <- matrix(1 - stats::pnorm(design$futilityBounds), ncol = 1) designPlan$.setParameterType("futilityBoundsPValueScale", C_PARAM_GENERATED) } if (groups == 2) { if (design$sided == 2 && ((thetaH0 != 0 && !meanRatio) || (thetaH0 != 1 && meanRatio))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "two-sided case is implemented only for superiority testing (i.e., thetaH0 = ", ifelse(meanRatio, 1, 0), ")" ) } if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } if (allocationRatioPlanned < 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'allocationRatioPlanned' (", allocationRatioPlanned, ") must be >= 0" ) } .setValueAndParameterType(designPlan, "allocationRatioPlanned", allocationRatioPlanned, 1) if (meanRatio && thetaH0 <= 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "null hypothesis mean ratio is not allowed be negative or zero, ", "i.e., 'thetaH0' must be > 0 if 'meanRatio' = TRUE" ) } } .setValueAndParameterType(designPlan, "normalApproximation", normalApproximation, FALSE) .setValueAndParameterType(designPlan, "meanRatio", meanRatio, FALSE) .setValueAndParameterType(designPlan, "thetaH0", thetaH0, 0) if (objectType == "power") { .setValueAndParameterType( designPlan, "alternative", alternative, C_ALTERNATIVE_POWER_SIMULATION_DEFAULT ) } else { .setValueAndParameterType(designPlan, "alternative", alternative, C_ALTERNATIVE_DEFAULT) } .setValueAndParameterType(designPlan, "stDev", stDev, C_STDEV_DEFAULT) if (objectType == "power") { .assertIsValidMaxNumberOfSubjects(maxNumberOfSubjects) .setValueAndParameterType(designPlan, "maxNumberOfSubjects", maxNumberOfSubjects, NA_real_) .setValueAndParameterType(designPlan, "directionUpper", directionUpper, TRUE) designPlan$.setParameterType("effect", C_PARAM_GENERATED) } .setValueAndParameterType(designPlan, "groups", groups, 2) if (groups == 1) { if (isTRUE(meanRatio)) { warning("'meanRatio' (", meanRatio, ") will be ignored ", "because it is not applicable for 'groups' = 1", call. = FALSE ) } designPlan$.setParameterType("meanRatio", C_PARAM_NOT_APPLICABLE) if (length(allocationRatioPlanned) == 1 && !is.na(allocationRatioPlanned)) { warning("'allocationRatioPlanned' (", allocationRatioPlanned, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE ) } designPlan$.setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) } return(designPlan) } # # note that 'directionUpper' and 'maxNumberOfSubjects' are # only applicable for 'objectType' = "power" # .createDesignPlanRates <- function(..., objectType = c("sampleSize", "power"), design, normalApproximation = TRUE, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = C_PI_1_SAMPLE_SIZE_DEFAULT, pi2 = C_PI_2_DEFAULT, directionUpper = NA, maxNumberOfSubjects = NA_real_, groups = 2, allocationRatioPlanned = NA_real_) { objectType <- match.arg(objectType) .assertIsTrialDesignInverseNormalOrGroupSequential(design) .assertIsValidAlphaAndBeta(design$alpha, design$beta) .assertIsValidSidedParameter(design$sided) .assertIsValidGroupsParameter(groups) .assertIsSingleLogical(normalApproximation, "normalApproximation") .assertIsSingleLogical(riskRatio, "riskRatio") .assertIsValidDirectionUpper(directionUpper, design$sided, objectType, userFunctionCallEnabled = TRUE) if (groups == 1) { if (!any(is.na(pi1)) && any(pi1 == thetaH0) && (objectType == "sampleSize")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any 'pi1' (", .arrayToString(pi1), ") must be != 'thetaH0' (", thetaH0, ")" ) } if (any(is.na(pi1)) || any(pi1 <= 0) || any(pi1 >= 1)) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "probability 'pi1' (", .arrayToString(pi1), ") is out of bounds (0; 1)" ) } if (thetaH0 >= 1 || thetaH0 <= 0) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'thetaH0' (", thetaH0, ") is out of bounds (0; 1)") } if (!normalApproximation && design$sided == 2 && (objectType == "sampleSize")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "exact sample size calculation not available for two-sided testing" ) } } else if (groups == 2) { if (!any(is.na(c(pi1, pi2))) && any(abs(pi1 - pi2 - thetaH0) < 1E-12) && (objectType == "sampleSize") && !riskRatio) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any 'pi1 - pi2' (", .arrayToString(pi1 - pi2), ") must be != 'thetaH0' (", thetaH0, ")" ) } if (!any(is.na(c(pi1, pi2))) && any(abs(pi1 / pi2 - thetaH0) < 1E-12) && (objectType == "sampleSize") && riskRatio) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "any 'pi1 / pi2' (", .arrayToString(pi1 / pi2), ") must be != 'thetaH0' (", thetaH0, ")" ) } if (any(is.na(pi1)) || any(pi1 <= 0) || any(pi1 >= 1)) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "probability 'pi1' (", .arrayToString(pi1), ") is out of bounds (0; 1)" ) } if (any(is.na(pi2)) || any(pi2 <= 0) || any(pi2 >= 1)) { stop( C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "probability 'pi2' (", .arrayToString(pi2), ") is out of bounds (0; 1)" ) } if (design$sided == 2 && ((thetaH0 != 0 && !riskRatio) || (thetaH0 != 1 && riskRatio))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "two-sided case is implemented only for superiority testing") } if (!normalApproximation) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "only normal approximation case is implemented for two groups" ) } if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } if (allocationRatioPlanned < 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'allocationRatioPlanned' (", allocationRatioPlanned, ") must be >= 0" ) } if (riskRatio && thetaH0 <= 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "null hypothesis risk ratio is not allowed be negative or zero, ", "i.e., 'thetaH0' must be > 0 if 'riskRatio' = TRUE" ) } } designPlan <- TrialDesignPlanRates(design = design) designPlan$.setSampleSizeObject(objectType) designPlan$criticalValuesPValueScale <- matrix(design$stageLevels, ncol = 1) if (design$sided == 2) { designPlan$criticalValuesPValueScale <- designPlan$criticalValuesPValueScale * 2 designPlan$.setParameterType("criticalValuesPValueScale", C_PARAM_GENERATED) } if (any(design$futilityBounds > C_FUTILITY_BOUNDS_DEFAULT)) { designPlan$futilityBoundsPValueScale <- matrix(1 - stats::pnorm(design$futilityBounds), ncol = 1) designPlan$.setParameterType("futilityBoundsPValueScale", C_PARAM_GENERATED) } if (objectType == "power") { .assertIsValidMaxNumberOfSubjects(maxNumberOfSubjects) .setValueAndParameterType(designPlan, "maxNumberOfSubjects", maxNumberOfSubjects, NA_real_) .setValueAndParameterType(designPlan, "directionUpper", directionUpper, TRUE) designPlan$.setParameterType("effect", C_PARAM_GENERATED) } .setValueAndParameterType(designPlan, "normalApproximation", normalApproximation, TRUE) .setValueAndParameterType(designPlan, "thetaH0", thetaH0, ifelse(riskRatio, 1, 0)) .assertIsValidThetaH0(thetaH0, endpoint = "rates", groups = groups, ratioEnabled = riskRatio) if (objectType == "power") { .setValueAndParameterType(designPlan, "pi1", pi1, C_PI_1_DEFAULT) } else { .setValueAndParameterType(designPlan, "pi1", pi1, C_PI_1_SAMPLE_SIZE_DEFAULT) } .setValueAndParameterType(designPlan, "pi2", pi2, 0.2) if (groups == 1) { if (designPlan$.getParameterType("pi2") == C_PARAM_USER_DEFINED) { warning("'pi2' (", pi2, ") will be ignored ", "because it is not applicable for 'groups' = 1", call. = FALSE ) } designPlan$.setParameterType("pi2", C_PARAM_NOT_APPLICABLE) if (isTRUE(riskRatio)) { warning("'riskRatio' (", riskRatio, ") will be ignored ", "because it is not applicable for 'groups' = 1", call. = FALSE ) } designPlan$.setParameterType("riskRatio", C_PARAM_NOT_APPLICABLE) if (length(allocationRatioPlanned) == 1 && !is.na(allocationRatioPlanned)) { warning("'allocationRatioPlanned' (", allocationRatioPlanned, ") will be ignored because it is not applicable for 'groups' = 1", call. = FALSE ) } designPlan$.setParameterType("allocationRatioPlanned", C_PARAM_NOT_APPLICABLE) } else { .setValueAndParameterType(designPlan, "riskRatio", riskRatio, FALSE) .setValueAndParameterType( designPlan, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT ) } .setValueAndParameterType(designPlan, "groups", groups, 2) return(designPlan) } #' @title #' Get Power Means #' #' @description #' Returns the power, stopping probabilities, and expected sample size for #' testing means in one or two samples at given sample size. #' #' @inheritParams param_design_with_default #' @inheritParams param_groups #' @param normalApproximation The type of computation of the p-values. If \code{TRUE}, the variance is #' assumed to be known, default is \code{FALSE}, i.e., the calculations are performed #' with the t distribution. #' @param meanRatio If \code{TRUE}, the sample size for #' one-sided testing of H0: \code{mu1 / mu2 = thetaH0} is calculated, default is \code{FALSE}. #' @inheritParams param_thetaH0 #' @inheritParams param_alternative #' @inheritParams param_stDev #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_directionUpper #' @inheritParams param_maxNumberOfSubjects #' @inheritParams param_three_dots #' #' @details #' At given design the function calculates the power, stopping probabilities, #' and expected sample size, for testing means at given sample size. #' In a two treatment groups design, additionally, an #' allocation ratio = \code{n1 / n2} can be specified. #' A null hypothesis value thetaH0 != 0 for testing the difference of two means #' or \code{thetaH0 != 1} for testing the ratio of two means can be specified. #' For the specified sample size, critical bounds and stopping for futility #' bounds are provided at the effect scale (mean, mean difference, or #' mean ratio, respectively) #' #' @template return_object_trial_design_plan #' @template how_to_get_help_for_generics #' #' @family power functions #' #' @template examples_get_power_means #' #' @export #' getPowerMeans <- function(design = NULL, ..., groups = 2L, normalApproximation = FALSE, meanRatio = FALSE, thetaH0 = ifelse(meanRatio, 1, 0), alternative = seq(0, 1, 0.2), # C_ALTERNATIVE_POWER_SIMULATION_DEFAULT stDev = 1, # C_STDEV_DEFAULT directionUpper = NA, maxNumberOfSubjects = NA_real_, allocationRatioPlanned = NA_real_ # C_ALLOCATION_RATIO_DEFAULT ) { .assertIsValidMaxNumberOfSubjects(maxNumberOfSubjects) if (is.null(design)) { design <- .getDefaultDesign(..., type = "power") .warnInCaseOfUnknownArguments( functionName = "getPowerMeans", ignore = .getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), ... ) } else { .warnInCaseOfUnknownArguments(functionName = "getPowerMeans", ...) .assertIsTrialDesign(design) .warnInCaseOfTwoSidedPowerArgument(...) .warnInCaseOfTwoSidedPowerIsDisabled(design) } designPlan <- .createDesignPlanMeans( objectType = "power", design = design, normalApproximation = normalApproximation, meanRatio = meanRatio, thetaH0 = thetaH0, alternative = alternative, stDev = stDev, directionUpper = directionUpper, maxNumberOfSubjects = maxNumberOfSubjects, groups = groups, allocationRatioPlanned = allocationRatioPlanned, ... ) if (designPlan$groups == 1) { theta <- (designPlan$alternative - designPlan$thetaH0) / designPlan$stDev if (!is.na(designPlan$directionUpper) && !designPlan$directionUpper) { theta <- -theta } if (designPlan$normalApproximation) { powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber( design, theta, maxNumberOfSubjects ) } else { thetaAdj <- (sign(theta) * .getOneMinusQNorm(design$alpha / design$sided) - .getQNorm(stats::pt( sign(theta) * stats::qt(1 - design$alpha / design$sided, maxNumberOfSubjects - 1), maxNumberOfSubjects - 1, theta * sqrt(maxNumberOfSubjects) ))) / sqrt(maxNumberOfSubjects) powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber( design, thetaAdj, maxNumberOfSubjects ) } } else { if (!designPlan$meanRatio) { theta <- sqrt(designPlan$allocationRatioPlanned) / (1 + designPlan$allocationRatioPlanned) * (designPlan$alternative - designPlan$thetaH0) / designPlan$stDev } else { theta <- sqrt(designPlan$allocationRatioPlanned) / sqrt((1 + designPlan$allocationRatioPlanned * designPlan$thetaH0^2) * (1 + designPlan$allocationRatioPlanned)) * (designPlan$alternative - designPlan$thetaH0) / designPlan$stDev } if (!is.na(designPlan$directionUpper) && !designPlan$directionUpper) { theta <- -theta } if (designPlan$normalApproximation) { powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber( design, theta, maxNumberOfSubjects ) } else { thetaAdj <- (sign(theta) * .getOneMinusQNorm(design$alpha / design$sided) - .getQNorm(stats::pt( sign(theta) * stats::qt(1 - design$alpha / design$sided, maxNumberOfSubjects - 2), maxNumberOfSubjects - 2, theta * sqrt(maxNumberOfSubjects) ))) / sqrt(maxNumberOfSubjects) powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber( design, thetaAdj, maxNumberOfSubjects ) } } designPlan$effect <- designPlan$alternative - designPlan$thetaH0 designPlan$expectedNumberOfSubjects <- powerAndAverageSampleNumber$averageSampleNumber designPlan$overallReject <- powerAndAverageSampleNumber$overallReject designPlan$rejectPerStage <- powerAndAverageSampleNumber$rejectPerStage designPlan$futilityStop <- powerAndAverageSampleNumber$overallFutility designPlan$futilityPerStage <- powerAndAverageSampleNumber$futilityPerStage designPlan$earlyStop <- powerAndAverageSampleNumber$overallEarlyStop parameterNames <- c("overallReject") if (design$kMax > 1) { parameterNames <- c( parameterNames, "expectedNumberOfSubjects", "rejectPerStage", "futilityStop", "futilityPerStage", "earlyStop" ) } for (parameterName in parameterNames) { designPlan$.setParameterType(parameterName, C_PARAM_GENERATED) } .addNumberOfSubjectsToPowerResult(designPlan) .addEffectScaleBoundaryDataToDesignPlan(designPlan) .hideFutilityStopsIfNotApplicable(designPlan) return(designPlan) } #' @title #' Get Power Rates #' #' @description #' Returns the power, stopping probabilities, and expected sample size for testing rates #' in one or two samples at given sample sizes. #' #' @inheritParams param_design_with_default #' @inheritParams param_groups #' @param riskRatio If \code{TRUE}, the power for one-sided #' testing of H0: \code{pi1 / pi2 = thetaH0} is calculated, default is \code{FALSE}. #' @inheritParams param_thetaH0 #' @inheritParams param_pi1_rates #' @inheritParams param_pi2_rates #' @inheritParams param_directionUpper #' @inheritParams param_maxNumberOfSubjects #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_three_dots #' #' @details #' At given design the function calculates the power, stopping probabilities, and expected sample size, #' for testing rates for given maximum sample size. #' The sample sizes over the stages are calculated according to the specified information rate in the design. #' In a two treatment groups design, additionally, an allocation ratio = n1/n2 can be specified. #' If a null hypothesis value thetaH0 != 0 for testing the difference of two rates #' or \code{thetaH0 != 1} for testing the risk ratio is specified, the #' formulas according to Farrington & Manning (Statistics in Medicine, 1990) are used (only one-sided testing). #' Critical bounds and stopping for futility bounds are provided at the effect scale #' (rate, rate difference, or rate ratio, respectively). #' For the two-sample case, the calculation here is performed at fixed pi2 as given as argument in the function. #' Note that the power calculation for rates is always based on the normal approximation. #' #' @template return_object_trial_design_plan #' @template how_to_get_help_for_generics #' #' @family power functions #' #' @template examples_get_power_rates #' #' @export #' getPowerRates <- function(design = NULL, ..., groups = 2L, riskRatio = FALSE, thetaH0 = ifelse(riskRatio, 1, 0), pi1 = seq(0.2, 0.5, 0.1), # C_PI_1_DEFAULT pi2 = 0.2, # C_PI_2_DEFAULT directionUpper = NA, maxNumberOfSubjects = NA_real_, allocationRatioPlanned = NA_real_ # C_ALLOCATION_RATIO_DEFAULT ) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "power") .warnInCaseOfUnknownArguments( functionName = "getPowerRates", ignore = .getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), ... ) } else { .warnInCaseOfUnknownArguments(functionName = "getPowerRates", ...) .assertIsTrialDesign(design) .warnInCaseOfTwoSidedPowerArgument(...) .warnInCaseOfTwoSidedPowerIsDisabled(design) } designPlan <- .createDesignPlanRates( objectType = "power", design = design, riskRatio = riskRatio, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, directionUpper = directionUpper, maxNumberOfSubjects = maxNumberOfSubjects, groups = groups, allocationRatioPlanned = allocationRatioPlanned, ... ) if (!is.na(allocationRatioPlanned) && allocationRatioPlanned <= 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "allocation ratio must be > 0") } allocationRatioPlanned <- designPlan$allocationRatioPlanned theta <- rep(NA_real_, length(pi1)) if (groups == 1) { designPlan$effect <- pi1 - thetaH0 theta <- (pi1 - thetaH0) / sqrt(pi1 * (1 - pi1)) + sign(pi1 - thetaH0) * .getOneMinusQNorm(design$alpha / design$sided) * (1 - sqrt(thetaH0 * (1 - thetaH0) / (pi1 * (1 - pi1)))) / sqrt(maxNumberOfSubjects) } else { if (!riskRatio) { designPlan$effect <- pi1 - pi2 - thetaH0 for (i in (1:length(pi1))) { fm <- .getFarringtonManningValues( rate1 = pi1[i], rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned, method = "diff" ) theta[i] <- sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) * (pi1[i] - pi2 - thetaH0) * sqrt(1 + allocationRatioPlanned) / sqrt(pi1[i] * (1 - pi1[i]) + allocationRatioPlanned * pi2 * (1 - pi2)) + sign(pi1[i] - pi2 - thetaH0) * .getOneMinusQNorm(design$alpha / design$sided) * (1 - sqrt(fm$ml1 * (1 - fm$ml1) + allocationRatioPlanned * fm$ml2 * (1 - fm$ml2)) / sqrt(pi1[i] * (1 - pi1[i]) + allocationRatioPlanned * pi2 * (1 - pi2))) / sqrt(maxNumberOfSubjects) } } else { designPlan$effect <- pi1 / pi2 - thetaH0 for (i in (1:length(pi1))) { fm <- .getFarringtonManningValues( rate1 = pi1[i], rate2 = pi2, theta = thetaH0, allocation = allocationRatioPlanned, method = "ratio" ) theta[i] <- sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) * (pi1[i] - thetaH0 * pi2) * sqrt(1 + allocationRatioPlanned) / sqrt(pi1[i] * (1 - pi1[i]) + allocationRatioPlanned * thetaH0^2 * pi2 * (1 - pi2)) + sign(pi1[i] - thetaH0 * pi2) * .getOneMinusQNorm(design$alpha / design$sided) * (1 - sqrt(fm$ml1 * (1 - fm$ml1) + allocationRatioPlanned * thetaH0^2 * fm$ml2 * (1 - fm$ml2)) / sqrt(pi1[i] * (1 - pi1[i]) + allocationRatioPlanned * thetaH0^2 * pi2 * (1 - pi2))) / sqrt(maxNumberOfSubjects) } } } if (!is.na(designPlan$directionUpper) && !designPlan$directionUpper) { theta <- -theta } powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber(design, theta, maxNumberOfSubjects) designPlan$expectedNumberOfSubjects <- powerAndAverageSampleNumber$averageSampleNumber designPlan$overallReject <- powerAndAverageSampleNumber$overallReject designPlan$rejectPerStage <- powerAndAverageSampleNumber$rejectPerStage designPlan$futilityStop <- powerAndAverageSampleNumber$overallFutility designPlan$futilityPerStage <- powerAndAverageSampleNumber$futilityPerStage designPlan$earlyStop <- powerAndAverageSampleNumber$overallEarlyStop parameterNames <- c("overallReject") if (design$kMax > 1) { parameterNames <- c( parameterNames, "expectedNumberOfSubjects", "rejectPerStage", "futilityStop", "futilityPerStage", "earlyStop" ) } for (parameterName in parameterNames) { designPlan$.setParameterType(parameterName, C_PARAM_GENERATED) } .addNumberOfSubjectsToPowerResult(designPlan) .addEffectScaleBoundaryDataToDesignPlan(designPlan) .hideFutilityStopsIfNotApplicable(designPlan) return(designPlan) } .getNumberOfSubjectsInner <- function(..., timeValue, accrualTime, accrualIntensity, maxNumberOfSubjects) { .assertIsSingleNumber(timeValue, "timeValue") if (length(accrualTime) != length(accrualIntensity)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'accrualTime' (", length(accrualIntensity), ") ", "must be equel to length of 'accrualIntensity' (", length(accrualIntensity), ")" ) } densityIntervals <- accrualTime if (length(accrualTime) > 1) { densityIntervals[2:length(accrualTime)] <- accrualTime[2:length(accrualTime)] - accrualTime[1:(length(accrualTime) - 1)] } densityVector <- accrualIntensity / sum(densityIntervals * accrualIntensity) for (l in 1:length(densityVector)) { if (timeValue <= accrualTime[l]) { if (l == 1) { return(timeValue * densityVector[l] * maxNumberOfSubjects) } else { return((sum(densityVector[1:(l - 1)] * densityIntervals[1:(l - 1)]) + (timeValue - accrualTime[l - 1]) * densityVector[l]) * maxNumberOfSubjects) } } } return(maxNumberOfSubjects) } .getNumberOfSubjects <- function(..., time, accrualTime, accrualIntensity, maxNumberOfSubjects) { subjectNumbers <- c() for (timeValue in time) { if (is.na(timeValue)) { return(NA_real_) } subjectNumbers <- c( subjectNumbers, .getNumberOfSubjectsInner( timeValue = timeValue, accrualTime = accrualTime, accrualIntensity = accrualIntensity, maxNumberOfSubjects = maxNumberOfSubjects ) ) } return(subjectNumbers) } #' @title #' Get Power Survival #' #' @description #' Returns the power, stopping probabilities, and expected sample size for testing #' the hazard ratio in a two treatment groups survival design. #' #' @inheritParams param_design_with_default #' @inheritParams param_typeOfComputation #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_thetaH0 #' @inheritParams param_lambda1 #' @inheritParams param_lambda2 #' @inheritParams param_pi1_survival #' @inheritParams param_pi2_survival #' @inheritParams param_median1 #' @inheritParams param_median2 #' @inheritParams param_piecewiseSurvivalTime #' @inheritParams param_directionUpper #' @inheritParams param_accrualTime #' @inheritParams param_accrualIntensity #' @inheritParams param_accrualIntensityType #' @inheritParams param_eventTime #' @inheritParams param_hazardRatio #' @inheritParams param_kappa #' @inheritParams param_dropoutRate1 #' @inheritParams param_dropoutRate2 #' @inheritParams param_dropoutTime #' @param maxNumberOfEvents \code{maxNumberOfEvents > 0} is the maximum number of events, it determines #' the power of the test and needs to be specified. #' @inheritParams param_maxNumberOfSubjects_survival #' @inheritParams param_three_dots #' #' @details #' At given design the function calculates the power, stopping probabilities, and expected #' sample size at given number of events and number of subjects. #' It also calculates the time when the required events are expected under the given #' assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times #' and constant or non-constant piecewise accrual). #' Additionally, an allocation ratio = n1/n2 can be specified where n1 and n2 are the number #' of subjects in the two treatment groups. #' #' The formula of Kim & Tsiatis (Biometrics, 1990) #' is used to calculate the expected number of events under the alternative #' (see also Lakatos & Lan, Statistics in Medicine, 1992). These formulas are generalized to piecewise survival times and #' non-constant piecewise accrual over time.\cr #' #' @template details_piecewise_survival #' #' @template details_piecewise_accrual #' #' @template return_object_trial_design_plan #' @template how_to_get_help_for_generics #' #' @family power functions #' #' @template examples_get_power_survival #' #' @export #' getPowerSurvival <- function(design = NULL, ..., typeOfComputation = c("Schoenfeld", "Freedman", "HsiehFreedman"), thetaH0 = 1, # C_THETA_H0_SURVIVAL_DEFAULT directionUpper = NA, pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, kappa = 1, hazardRatio = NA_real_, piecewiseSurvivalTime = NA_real_, allocationRatioPlanned = 1, # C_ALLOCATION_RATIO_DEFAULT eventTime = 12, # C_EVENT_TIME_DEFAULT accrualTime = c(0, 12), # C_ACCRUAL_TIME_DEFAULT accrualIntensity = 0.1, # C_ACCRUAL_INTENSITY_DEFAULT accrualIntensityType = c("auto", "absolute", "relative"), maxNumberOfSubjects = NA_real_, maxNumberOfEvents = NA_real_, dropoutRate1 = 0, # C_DROP_OUT_RATE_1_DEFAULT dropoutRate2 = 0, # C_DROP_OUT_RATE_2_DEFAULT dropoutTime = 12 # C_DROP_OUT_TIME_DEFAULT ) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "power") .warnInCaseOfUnknownArguments( functionName = "getPowerSurvival", ignore = .getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), ... ) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments(functionName = "getPowerSurvival", ...) .warnInCaseOfTwoSidedPowerArgument(...) .warnInCaseOfTwoSidedPowerIsDisabled(design) } designPlan <- .createDesignPlanSurvival( objectType = "power", design = design, typeOfComputation = typeOfComputation, thetaH0 = thetaH0, pi2 = pi2, pi1 = pi1, allocationRatioPlanned = allocationRatioPlanned, accountForObservationTimes = TRUE, eventTime = eventTime, accrualTime = accrualTime, accrualIntensity = accrualIntensity, accrualIntensityType = accrualIntensityType, kappa = kappa, piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, directionUpper = directionUpper, maxNumberOfEvents = maxNumberOfEvents, maxNumberOfSubjects = maxNumberOfSubjects, dropoutRate1 = dropoutRate1, dropoutRate2 = dropoutRate2, dropoutTime = dropoutTime, hazardRatio = hazardRatio ) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) if (designPlan$typeOfComputation == "Schoenfeld") { theta <- sqrt(allocationRatioPlanned) / (1 + allocationRatioPlanned) * (log(designPlan$hazardRatio / thetaH0)) } else if (designPlan$typeOfComputation == "Freedman") { theta <- sqrt(allocationRatioPlanned) * (designPlan$hazardRatio - 1) / (allocationRatioPlanned * designPlan$hazardRatio + 1) } else if (designPlan$typeOfComputation == "HsiehFreedman") { theta <- sqrt(4 * allocationRatioPlanned) / (1 + allocationRatioPlanned) * (designPlan$hazardRatio - 1) / (designPlan$hazardRatio + 1) } if (!is.na(designPlan$directionUpper) && !designPlan$directionUpper) { theta <- -theta } powerAndAverageSampleNumber <- getPowerAndAverageSampleNumber( design = design, theta = theta, nMax = maxNumberOfEvents ) kMax <- design$kMax sided <- design$sided if (designPlan$.piecewiseSurvivalTime$.isLambdaBased()) { numberOfResults <- length(designPlan$hazardRatio) } else { numberOfResults <- length(designPlan$pi1) } stoppingProbs <- matrix(NA_real_, kMax, numberOfResults) designPlan$analysisTime <- matrix(NA_real_, kMax, numberOfResults) designPlan$numberOfSubjects <- matrix(NA_real_, kMax, numberOfResults) designPlan$studyDuration <- rep(NA_real_, numberOfResults) designPlan$expectedNumberOfSubjects <- rep(NA_real_, numberOfResults) eventsPerStage <- maxNumberOfEvents * design$informationRates parameterNames <- c( "analysisTime", "numberOfSubjects", "studyDuration", "expectedNumberOfSubjects", "eventsPerStage" ) phi <- -c(log(1 - dropoutRate1), log(1 - dropoutRate2)) / dropoutTime lambda1 <- designPlan$lambda1 if (designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { lambda1 <- rep(NA_real_, numberOfResults) } for (i in 1:numberOfResults) { # Analysis times up <- 2 iterate <- 1 while (eventsPerStage[kMax] / designPlan$maxNumberOfSubjects > .getEventProbabilities( time = up, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], phi = phi, piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i] )) { up <- 2 * up iterate <- iterate + 1 if (iterate > 50) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' (", designPlan$maxNumberOfSubjects, ") ", "is too small to reach maximum number of events ", "(presumably due to drop-out rates)" ) } } for (j in 1:kMax) { designPlan$analysisTime[j, i] <- .getOneDimensionalRoot( function(x) { eventsPerStage[j] / designPlan$maxNumberOfSubjects - .getEventProbabilities( time = x, accrualTimeVector = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, lambda2 = designPlan$lambda2, lambda1 = lambda1[i], phi = phi, piecewiseSurvivalTime = designPlan$piecewiseSurvivalTime, kappa = kappa, allocationRatioPlanned = allocationRatioPlanned, hazardRatio = designPlan$hazardRatio[i] ) }, lower = 0, upper = up, tolerance = 1e-06, callingFunctionInformation = "getPowerSurvival" ) if (is.na(designPlan$analysisTime[j, i])) { warning("Cannot calculate analysis time at stage ", j, ": ", "'maxNumberOfSubjects' (", designPlan$maxNumberOfSubjects, ") is too ", "small to reach maximum number of events", call. = FALSE ) } } if (kMax > 1) { designPlan$numberOfSubjects[, i] <- .getNumberOfSubjects( time = designPlan$analysisTime[, i], accrualTime = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, maxNumberOfSubjects = designPlan$maxNumberOfSubjects ) powerAndAverageSampleNumber$futilityPerStage[is.na( powerAndAverageSampleNumber$futilityPerStage[, i] ), i] <- 0 stoppingProbs[, i] <- powerAndAverageSampleNumber$rejectPerStage[, i] + c(powerAndAverageSampleNumber$futilityPerStage[, i], 0) stoppingProbs[kMax, i] <- 1 - sum(stoppingProbs[1:(kMax - 1), i]) designPlan$studyDuration[i] <- designPlan$analysisTime[, i] %*% stoppingProbs[, i] designPlan$.setParameterType("studyDuration", C_PARAM_GENERATED) designPlan$expectedNumberOfSubjects[i] <- designPlan$numberOfSubjects[, i] %*% stoppingProbs[, i] } } if (kMax == 1) { designPlan$expectedNumberOfSubjects <- .getNumberOfSubjects( time = designPlan$analysisTime[1, ], accrualTime = designPlan$accrualTime, accrualIntensity = designPlan$accrualIntensity, maxNumberOfSubjects = designPlan$maxNumberOfSubjects ) designPlan$numberOfSubjects <- matrix(designPlan$expectedNumberOfSubjects, nrow = 1) } designPlan$eventsPerStage <- matrix(eventsPerStage, ncol = 1) designPlan$.setParameterType("eventsPerStage", C_PARAM_GENERATED) designPlan$expectedNumberOfEvents <- powerAndAverageSampleNumber$averageSampleNumber designPlan$overallReject <- powerAndAverageSampleNumber$overallReject designPlan$rejectPerStage <- powerAndAverageSampleNumber$rejectPerStage designPlan$futilityStop <- powerAndAverageSampleNumber$overallFutility designPlan$futilityPerStage <- powerAndAverageSampleNumber$futilityPerStage designPlan$earlyStop <- powerAndAverageSampleNumber$overallEarlyStop parameterNames <- c( parameterNames, "expectedNumberOfEvents", "overallReject", "rejectPerStage", "futilityStop", "futilityPerStage", "earlyStop" ) for (parameterName in parameterNames) { designPlan$.setParameterType(parameterName, C_PARAM_GENERATED) } if (kMax == 1L) { designPlan$.setParameterType("numberOfSubjects", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("eventsPerStage", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("futilityStop", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("earlyStop", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("rejectPerStage", C_PARAM_NOT_APPLICABLE) } if (!any(is.na(designPlan$analysisTime)) && !any(is.na(designPlan$accrualTime))) { designPlan$followUpTime <- designPlan$analysisTime[kMax, ] - designPlan$accrualTime[length(designPlan$accrualTime)] designPlan$.setParameterType("followUpTime", C_PARAM_GENERATED) } .addEffectScaleBoundaryDataToDesignPlan(designPlan) .addStudyDurationToDesignPlan(designPlan) .hideFutilityStopsIfNotApplicable(designPlan) return(designPlan) } .hideFutilityStopsIfNotApplicable <- function(designPlan) { if (all(designPlan$.design$futilityBounds == C_FUTILITY_BOUNDS_DEFAULT)) { designPlan$.setParameterType("futilityStop", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("futilityPerStage", C_PARAM_NOT_APPLICABLE) } } .addStudyDurationToDesignPlan <- function(designPlan) { if (!designPlan$accountForObservationTimes) { return(invisible()) } kMax <- designPlan$.design$kMax if (kMax == 1) { designPlan$studyDuration <- designPlan$analysisTime[1, ] designPlan$.setParameterType("studyDuration", C_PARAM_GENERATED) designPlan$maxStudyDuration <- designPlan$studyDuration } else { designPlan$maxStudyDuration <- designPlan$analysisTime[kMax, ] designPlan$.setParameterType("maxStudyDuration", C_PARAM_GENERATED) } } .addNumberOfSubjectsToPowerResult <- function(designPlan) { design <- designPlan$.design designPlan$numberOfSubjects <- matrix(rep(NA_real_, design$kMax), ncol = 1) designPlan$numberOfSubjects[1, 1] <- design$informationRates[1] * designPlan$maxNumberOfSubjects if (design$kMax > 1) { designPlan$numberOfSubjects[2:design$kMax, 1] <- (design$informationRates[2:design$kMax] - design$informationRates[1:(design$kMax - 1)]) * designPlan$maxNumberOfSubjects } designPlan$numberOfSubjects <- .getColumnCumSum(designPlan$numberOfSubjects) designPlan$numberOfSubjects1 <- .getNumberOfSubjects1( designPlan$numberOfSubjects, designPlan$allocationRatioPlanned ) designPlan$numberOfSubjects2 <- .getNumberOfSubjects2( designPlan$numberOfSubjects, designPlan$allocationRatioPlanned ) if (designPlan$.design$kMax == 1) { designPlan$nFixed <- as.numeric(designPlan$numberOfSubjects) designPlan$.setParameterType("nFixed", C_PARAM_GENERATED) if (designPlan$groups == 2) { designPlan$nFixed1 <- as.numeric(designPlan$numberOfSubjects1) designPlan$nFixed2 <- as.numeric(designPlan$numberOfSubjects2) designPlan$.setParameterType("nFixed1", C_PARAM_GENERATED) designPlan$.setParameterType("nFixed2", C_PARAM_GENERATED) } } else { designPlan$.setParameterType("numberOfSubjects", C_PARAM_GENERATED) if ((designPlan$groups == 1) || designPlan$allocationRatioPlanned == 1) { designPlan$.setParameterType("numberOfSubjects1", C_PARAM_NOT_APPLICABLE) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_NOT_APPLICABLE) } else { designPlan$.setParameterType("numberOfSubjects1", C_PARAM_GENERATED) designPlan$.setParameterType("numberOfSubjects2", C_PARAM_GENERATED) } } } rpact/R/class_event_probabilities.R0000644000176200001440000004326414445307575017154 0ustar liggesusers## | ## | *Event probabilities classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' #' @name EventProbabilities #' #' @title #' Event Probabilities #' #' @template field_time #' @template field_accrualTime #' @template field_accrualIntensity #' @template field_kappa #' @template field_piecewiseSurvivalTime #' @template field_lambda1 #' @template field_lambda2 #' @template field_allocationRatioPlanned #' @template field_hazardRatio #' @template field_dropoutRate1 #' @template field_dropoutRate2 #' @template field_dropoutTime #' @template field_maxNumberOfSubjects #' @template field_overallEventProbabilities #' @template field_cumulativeEventProbabilities #' @template field_eventProbabilities1 #' @template field_eventProbabilities2 #' #' @description #' Class for the definition of event probabilities. #' #' @details #' \code{EventProbabilities} is a class for the definition of event probabilities. #' #' @importFrom methods new #' #' @include f_core_constants.R #' @include class_core_parameter_set.R #' @include class_time.R #' #' @keywords internal #' EventProbabilities <- setRefClass("EventProbabilities", contains = "ParameterSet", fields = list( .piecewiseSurvivalTime = "PiecewiseSurvivalTime", .accrualTime = "AccrualTime", .plotSettings = "PlotSettings", time = "numeric", accrualTime = "numeric", accrualIntensity = "numeric", kappa = "numeric", piecewiseSurvivalTime = "numeric", lambda1 = "numeric", lambda2 = "numeric", allocationRatioPlanned = "numeric", hazardRatio = "numeric", dropoutRate1 = "numeric", dropoutRate2 = "numeric", dropoutTime = "numeric", maxNumberOfSubjects = "numeric", overallEventProbabilities = "numeric", # deprecated cumulativeEventProbabilities = "numeric", eventProbabilities1 = "numeric", eventProbabilities2 = "numeric" ), methods = list( initialize = function(...) { callSuper(...) .plotSettings <<- PlotSettings() .parameterNames <<- C_PARAMETER_NAMES .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS .setParameterType("overallEventProbabilities", C_PARAM_NOT_APPLICABLE) # deprecated }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing event probabilities objects" .resetCat() if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Event probabilities at given time:\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), "Time and output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) .cat("Legend:\n", heading = 2, consoleOutputEnabled = consoleOutputEnabled) .cat(" (i): values of treatment arm i\n", consoleOutputEnabled = consoleOutputEnabled) .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } } ) ) #' #' @name NumberOfSubjects #' #' @title #' Number Of Subjects #' #' @description #' Class for the definition of number of subjects results. #' #' @template field_time #' @template field_accrualTime #' @template field_accrualIntensity #' @template field_maxNumberOfSubjects #' @template field_numberOfSubjects #' #' @details #' \code{NumberOfSubjects} is a class for the definition of number of subjects results. #' #' @importFrom methods new #' #' @include f_core_constants.R #' @include class_core_parameter_set.R #' @include class_time.R #' #' @keywords internal #' NumberOfSubjects <- setRefClass("NumberOfSubjects", contains = "ParameterSet", fields = list( .accrualTime = "AccrualTime", .plotSettings = "PlotSettings", time = "numeric", accrualTime = "numeric", accrualIntensity = "numeric", maxNumberOfSubjects = "numeric", numberOfSubjects = "numeric" ), methods = list( initialize = function(...) { callSuper(...) .plotSettings <<- PlotSettings() .parameterNames <<- C_PARAMETER_NAMES .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, getPlotSettings = function() { return(.plotSettings) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing number of subjects objects" .resetCat() if (showType == 2) { callSuper(showType = showType, digits = digits, consoleOutputEnabled = consoleOutputEnabled) } else { .cat("Number of recruited subjects at given time:\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getUserDefinedParameters(), "User defined parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getDefaultParameters(), "Default parameters", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showParametersOfOneGroup(.getGeneratedParameters(), "Time and output", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) .showUnknownParameters(consoleOutputEnabled = consoleOutputEnabled) .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } } ) ) #' #' @title #' Event Probabilities Plotting #' #' @description #' Plots an object that inherits from class \code{\link{EventProbabilities}}. #' #' @details #' Generic function to plot an event probabilities object. #' #' @param x The object that inherits from \code{\link{EventProbabilities}}. #' @param y An optional object that inherits from \code{\link{NumberOfSubjects}}. #' @inheritParams param_allocationRatioPlanned #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @param type The plot type (default = 1). Note that at the moment only one type is available. #' @param legendTitle The legend title, default is \code{""}. #' @inheritParams param_palette #' @inheritParams param_plotPointsEnabled #' @inheritParams param_showSource #' @inheritParams param_plotSettings #' @inheritParams param_legendPosition #' @inheritParams param_three_dots_plot #' #' @details #' Generic function to plot a parameter set. #' #' @template return_object_ggplot #' #' @export #' plot.EventProbabilities <- function(x, y, ..., allocationRatioPlanned = x$allocationRatioPlanned, main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1L, legendTitle = NA_character_, palette = "Set1", plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, plotSettings = NULL) { fCall <- match.call(expand.dots = FALSE) xObjectName <- deparse(fCall$x) yObjectName <- NA_character_ .assertGgplotIsInstalled() .assertIsValidLegendPosition(legendPosition) .assertIsValidAllocationRatioPlanned(allocationRatioPlanned, 2L) # .assertIsSingleInteger(type, "type", naAllowed = FALSE, validateType = FALSE) numberOfSubjectsObject <- NULL if (!missing(y) && inherits(y, "NumberOfSubjects")) { numberOfSubjectsObject <- y yObjectName <- deparse(fCall$y) } maxNumberOfSubjects <- 1 maxNumberOfSubjects1 <- 1 maxNumberOfSubjects2 <- 1 maxNumberOfSubjectsToUse <- NA_integer_ if (!is.null(numberOfSubjectsObject)) { maxNumberOfSubjectsToUse <- numberOfSubjectsObject$maxNumberOfSubjects } if (is.na(maxNumberOfSubjectsToUse)) { maxNumberOfSubjectsToUse <- x$maxNumberOfSubjects } else if (!is.na(x$maxNumberOfSubjects) && x$maxNumberOfSubjects != maxNumberOfSubjectsToUse) { stop("'x' (EventProbabilities) and 'y' (NumberOfSubjects) must have the same 'maxNumberOfSubjects' defined") } if (!is.na(maxNumberOfSubjectsToUse)) { maxNumberOfSubjects <- maxNumberOfSubjectsToUse maxNumberOfSubjects1 <- .getNumberOfSubjects1(maxNumberOfSubjects, allocationRatioPlanned) maxNumberOfSubjects2 <- .getNumberOfSubjects2(maxNumberOfSubjects, allocationRatioPlanned) } if (is.na(maxNumberOfSubjectsToUse)) { mainDefault <- "Event Probabilities" } else { mainDefault <- ifelse(!is.null(numberOfSubjectsObject), "Number of subjects and expected number of events", "Expected number of events" ) } main <- ifelse(is.na(main), mainDefault, main) if (!is.null(numberOfSubjectsObject)) { ylabDefault <- "Number of subjects/events" } else { ylabDefault <- ifelse(is.na(maxNumberOfSubjectsToUse), "Event probabilities", "Expected number of events" ) } ylab <- ifelse(is.na(ylab), ylabDefault, ylab) data <- data.frame( xValues = c(x$time, x$time, x$time), yValues = c( x$cumulativeEventProbabilities * maxNumberOfSubjects, # cumulative x$eventProbabilities1 * maxNumberOfSubjects1, # treatment x$eventProbabilities2 * maxNumberOfSubjects2 # control ), categories = c( rep("Overall", length(x$time)), rep("Treatment", length(x$time)), rep("Control", length(x$time)) ) ) data$categories <- factor(data$categories, levels = c("Overall", "Treatment", "Control")) if (!is.null(numberOfSubjectsObject)) { data <- rbind( data, data.frame( xValues = numberOfSubjectsObject$time, yValues = numberOfSubjectsObject$numberOfSubjects, categories = "Number of subjects" ) ) } if (is.na(legendPosition)) { legendPosition <- C_POSITION_LEFT_TOP } if (is.na(legendTitle)) { legendTitle <- "" } srcCmd <- .showPlotSourceInformation( objectName = xObjectName, xParameterName = "time", yParameterNames = c("cumulativeEventProbabilities", "eventProbabilities1", "eventProbabilities2"), type = type, showSource = showSource ) if (!is.na(yObjectName)) { srcCmd2 <- .showPlotSourceInformation( objectName = yObjectName, xParameterName = "time", yParameterNames = "numberOfSubjects", type = type, showSource = showSource ) if (is.list(srcCmd)) { if (!is.null(srcCmd2[["y"]])) { if (identical(x[["time"]], y[["time"]])) { srcCmd$y <- c(srcCmd$y, srcCmd2$y) } else { srcCmd$x2 <- srcCmd2[["x"]] srcCmd$y2 <- srcCmd2$y } } } else { srcCmd <- c(srcCmd, srcCmd2) } } if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } if (is.null(plotSettings)) { plotSettings <- x$.plotSettings } return(.plotDataFrame(data, mainTitle = main, xlab = xlab, ylab = ylab, xAxisLabel = "Time", yAxisLabel1 = NA_character_, yAxisLabel2 = NA_character_, palette = palette, plotPointsEnabled = plotPointsEnabled, legendTitle = legendTitle, legendPosition = legendPosition, scalingFactor1 = 1, scalingFactor2 = 1, addPowerAndAverageSampleNumber = FALSE, mirrorModeEnabled = FALSE, ratioEnabled = FALSE, plotSettings = plotSettings, sided = 1, ... )) } #' #' @title #' Number Of Subjects Plotting #' #' @description #' Plots an object that inherits from class \code{\link{NumberOfSubjects}}. #' #' @details #' Generic function to plot an "number of subjects" object. #' #' @param x The object that inherits from \code{\link{NumberOfSubjects}}. #' @param y An optional object that inherits from \code{\link{EventProbabilities}}. #' @param allocationRatioPlanned The planned allocation ratio \code{n1 / n2} for a two treatment groups #' design, default is \code{1}. Will be ignored if \code{y} is undefined. #' @param main The main title. #' @param xlab The x-axis label. #' @param ylab The y-axis label. #' @param type The plot type (default = 1). Note that at the moment only one type is available. #' @param legendTitle The legend title, default is \code{""}. #' @inheritParams param_palette #' @inheritParams param_plotPointsEnabled #' @inheritParams param_showSource #' @inheritParams param_plotSettings #' @inheritParams param_legendPosition #' @inheritParams param_three_dots_plot #' #' @details #' Generic function to plot a parameter set. #' #' @template return_object_ggplot #' #' @export #' plot.NumberOfSubjects <- function(x, y, ..., allocationRatioPlanned = NA_real_, main = NA_character_, xlab = NA_character_, ylab = NA_character_, type = 1L, legendTitle = NA_character_, palette = "Set1", plotPointsEnabled = NA, legendPosition = NA_integer_, showSource = FALSE, plotSettings = NULL) { fCall <- match.call(expand.dots = FALSE) objectName <- deparse(fCall$x) # .assertIsSingleInteger(type, "type", naAllowed = FALSE, validateType = FALSE) if (!missing(y) && inherits(y, "EventProbabilities")) { return(plot.EventProbabilities( x = y, y = x, allocationRatioPlanned = ifelse(is.na(allocationRatioPlanned), y$allocationRatioPlanned, allocationRatioPlanned), main = main, xlab = xlab, ylab = ylab, type = type, legendTitle = legendTitle, palette = palette, plotPointsEnabled = plotPointsEnabled, legendPosition = legendPosition, showSource = showSource, plotSettings = plotSettings, ... )) } if (!is.na(allocationRatioPlanned)) { warning("'allocationRatioPlanned' (", allocationRatioPlanned, ") will be ignored because 'y' is undefined (for more information see ?plot.NumberOfSubjects)", call. = FALSE ) } .assertGgplotIsInstalled() .assertIsValidLegendPosition(legendPosition) main <- ifelse(is.na(main), "Number of Subjects", main) ylab <- ifelse(is.na(ylab), "Number of subjects", ylab) data <- data.frame( xValues = x$time, yValues = x$numberOfSubjects, categories = "Number of subjects" ) if (is.na(legendPosition)) { legendPosition <- -1 } if (is.na(legendTitle)) { legendTitle <- "" } srcCmd <- .showPlotSourceInformation( objectName = objectName, xParameterName = "time", yParameterNames = "numberOfSubjects", type = type, showSource = showSource ) if (!is.null(srcCmd)) { if (.isSpecialPlotShowSourceArgument(showSource)) { return(invisible(srcCmd)) } return(srcCmd) } if (is.null(plotSettings)) { plotSettings <- x$.plotSettings } return(.plotDataFrame(data, mainTitle = main, xlab = xlab, ylab = ylab, xAxisLabel = "Time", yAxisLabel1 = NA_character_, yAxisLabel2 = NA_character_, palette = palette, plotPointsEnabled = plotPointsEnabled, legendTitle = legendTitle, legendPosition = legendPosition, scalingFactor1 = 1, scalingFactor2 = 1, addPowerAndAverageSampleNumber = FALSE, mirrorModeEnabled = FALSE, ratioEnabled = FALSE, plotSettings = plotSettings, sided = 1, ... )) } rpact/R/f_simulation_enrichment_rates.R0000644000176200001440000016456514445307576020052 0ustar liggesusers## | ## | *Simulation of enrichment design with binary data* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_simulation_enrichment.R NULL .getSimulationRatesEnrichmentStageSubjects <- function(..., stage, directionUpper, conditionalPower, conditionalCriticalValue, plannedSubjects, allocationRatioPlanned, selectedPopulations, piTreatmentH1, piControlH1, overallRatesTreatment, overallRatesControl, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage) { stage <- stage - 1 # to be consistent with non-enrichment situation gMax <- nrow(overallRatesTreatment) if (!is.na(conditionalPower)) { if (any(selectedPopulations[1:gMax, stage + 1], na.rm = TRUE)) { if (is.na(piControlH1)) { pi2H1 <- overallRatesControl[selectedPopulations[1:gMax, stage + 1], stage] } else { pi2H1 <- piControlH1 } if (is.na(piTreatmentH1)) { pi1H1 <- overallRatesTreatment[selectedPopulations[1:gMax, stage + 1], stage] } else { pi1H1 <- piTreatmentH1 } pim <- (allocationRatioPlanned[stage] * pi1H1 + pi2H1) / (1 + allocationRatioPlanned[stage]) if (conditionalCriticalValue[stage] > 8) { newSubjects <- maxNumberOfSubjectsPerStage[stage + 1] } else { newSubjects <- (1 + 1 / allocationRatioPlanned[stage]) * (max(0, conditionalCriticalValue[stage] * sqrt(pim * (1 - pim) * (1 + allocationRatioPlanned[stage])) + .getQNorm(conditionalPower) * sqrt(pi1H1 * (1 - pi1H1) + pi2H1 * (1 - pi2H1) * allocationRatioPlanned[stage]), na.rm = TRUE))^2 / (max(1e-7, (2 * directionUpper - 1) * (pi1H1 - pi2H1), na.rm = TRUE))^2 newSubjects <- min( max(minNumberOfSubjectsPerStage[stage + 1], newSubjects), maxNumberOfSubjectsPerStage[stage + 1] ) } } else { newSubjects <- 0 } } else { newSubjects <- plannedSubjects[stage + 1] - plannedSubjects[stage] } return(newSubjects) } .getSimulatedStageRatesEnrichment <- function(..., design, subsets, prevalences, directionUpper, piTreatments, piControls, stratifiedAnalysis, plannedSubjects, typeOfSelection, effectMeasure, adaptations, epsilonValue, rValue, threshold, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, piTreatmentH1, piControlH1, calcSubjectsFunction, calcSubjectsFunctionIsUserDefined, selectPopulationsFunction) { kMax <- length(plannedSubjects) pMax <- length(piTreatments) gMax <- log(length(piTreatments), 2) + 1 subjectsPerStage <- matrix(NA_real_, nrow = pMax, ncol = kMax) simEventsTreatment <- matrix(NA_real_, nrow = pMax, ncol = kMax) simEventsControl <- matrix(NA_real_, nrow = pMax, ncol = kMax) populationSubjectsPerStage <- matrix(NA_real_, nrow = gMax, ncol = kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) conditionalCriticalValue <- rep(NA_real_, kMax - 1) conditionalPowerPerStage <- rep(NA_real_, kMax) selectedPopulations <- matrix(FALSE, nrow = gMax, ncol = kMax) selectedSubsets <- matrix(FALSE, nrow = pMax, ncol = kMax) selectedPopulations[, 1] <- TRUE selectedSubsets[, 1] <- TRUE adjustedPValues <- rep(NA_real_, kMax) overallRatesTreatment <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallRatesControl <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallEffectSizes <- matrix(NA_real_, nrow = gMax, ncol = kMax) if (.isTrialDesignFisher(design)) { weights <- .getWeightsFisher(design) } else if (.isTrialDesignInverseNormal(design)) { weights <- .getWeightsInverseNormal(design) } for (k in 1:kMax) { const <- allocationRatioPlanned[k] selectedSubsets[, k] <- .createSelectedSubsets(k, selectedPopulations) if (k == 1) { # subjectsPerStage[, k] <- stats::rmultinom(1, plannedSubjects[k], prevalences) subjectsPerStage[, k] <- plannedSubjects[k] * prevalences } else { prevSelected <- prevalences / sum(prevalences[selectedSubsets[, k]]) prevSelected[!selectedSubsets[, k]] <- 0 if (sum(prevSelected, na.rm = TRUE) > 0) { # subjectsPerStage[, k] <- stats::rmultinom(1, plannedSubjects[k] - plannedSubjects[k - 1], prevSelected) subjectsPerStage[, k] <- (plannedSubjects[k] - plannedSubjects[k - 1]) * prevSelected } else { break } } selsubs <- !is.na(subjectsPerStage[, k]) & subjectsPerStage[, k] > 0 if (any(round(subjectsPerStage[selsubs, k] * const / (1 + const)) < 1) || any(round(subjectsPerStage[selsubs, k] / (1 + const)) < 1)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "at least one sample size specification too small to create simulation results, ", "e.g., due to small prevalences of subsets" ) } simEventsTreatment[selsubs, k] <- stats::rbinom( rep(1, sum(selsubs)), round(subjectsPerStage[selsubs, k] * const / (1 + const)), piTreatments[selsubs] ) simEventsControl[selsubs, k] <- stats::rbinom( rep(1, sum(selsubs)), round(subjectsPerStage[selsubs, k] / (1 + const)), piControls[selsubs] ) if (gMax == 1) { rm <- (simEventsControl[1, k] + simEventsTreatment[1, k]) / subjectsPerStage[1, k] if (rm <= 0 || rm >= 1) { testStatistics[1, k] <- 0 } else { testStatistics[1, k] <- (2 * directionUpper - 1) * (simEventsTreatment[1, k] * (1 + const) / const - simEventsControl[1, k] * (1 + const)) / subjectsPerStage[1, k] / sqrt(rm * (1 - rm)) * sqrt(subjectsPerStage[1, k] * const / (1 + const)^2) } populationSubjectsPerStage[1, k] <- subjectsPerStage[1, k] overallRatesTreatment[1, k] <- sum(simEventsTreatment[1, 1:k]) / round(const / (1 + const) * sum(subjectsPerStage[1, 1:k])) overallRatesControl[1, k] <- sum(simEventsControl[1, 1:k]) / round(1 / (1 + const) * sum(subjectsPerStage[1, 1:k])) overallEffectSizes[1, k] <- (2 * directionUpper - 1) * (overallRatesTreatment[1, k] - overallRatesControl[1, k]) rm <- sum(simEventsControl[1, 1:k] + simEventsTreatment[1, 1:k]) / sum(subjectsPerStage[1, 1:k]) if (rm == 0 || rm == 1) { overallTestStatistics[1, k] <- 0 } else { overallTestStatistics[1, k] <- overallEffectSizes[1, k] / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[1, 1:k]) * const / (1 + const)^2) } } else if (gMax == 2) { # Population S1 rm <- (simEventsControl[1, k] + simEventsTreatment[1, k]) / subjectsPerStage[1, k] if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { testStatistics[1, k] <- 0 } else { testStatistics[1, k] <- (2 * directionUpper - 1) * (simEventsTreatment[1, k] * (1 + const) / const - simEventsControl[1, k] * (1 + const)) / subjectsPerStage[1, k] / sqrt(rm * (1 - rm)) * sqrt(subjectsPerStage[1, k] * const / (1 + const)^2) } } populationSubjectsPerStage[1, k] <- subjectsPerStage[1, k] overallRatesTreatment[1, k] <- sum(simEventsTreatment[1, 1:k]) / round(const / (1 + const) * sum(subjectsPerStage[1, 1:k])) overallRatesControl[1, k] <- sum(simEventsControl[1, 1:k]) / round(1 / (1 + const) * sum(subjectsPerStage[1, 1:k])) overallEffectSizes[1, k] <- (2 * directionUpper - 1) * (overallRatesTreatment[1, k] - overallRatesControl[1, k]) rm <- sum(simEventsControl[1, 1:k] + simEventsTreatment[1, 1:k]) / sum(subjectsPerStage[1, 1:k]) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { overallTestStatistics[1, k] <- 0 } else { overallTestStatistics[1, k] <- overallEffectSizes[1, k] / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[1, 1:k]) * const / (1 + const)^2) } } # Full population if (stratifiedAnalysis) { rm <- (simEventsControl[1:2, k] + simEventsTreatment[1:2, k]) / subjectsPerStage[1:2, k] rm[!is.na(rm) & (rm <= 0 | rm >= 1)] <- 0 if (!all(is.na(rm))) { if (all(na.omit(rm) == 0) || all(na.omit(rm) == 1)) { testStatistics[2, k] <- 0 } else { testStatistics[2, k] <- sqrt(const) / (1 + const) * (2 * directionUpper - 1) * sum(subjectsPerStage[1:2, k] * (simEventsTreatment[1:2, k] * (1 + const) / const - simEventsControl[1:2, k] * (1 + const)) / subjectsPerStage[1:2, k], na.rm = TRUE) / sqrt(sum(rm * (1 - rm) * subjectsPerStage[1:2, k], na.rm = TRUE)) } } } else { rm <- sum(simEventsControl[1:2, k] + simEventsTreatment[1:2, k], na.rm = TRUE) / sum(subjectsPerStage[1:2, k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { testStatistics[2, k] <- 0 } else { testStatistics[2, k] <- (2 * directionUpper - 1) * sum(simEventsTreatment[1:2, k] * (1 + const) / const - simEventsControl[1:2, k] * (1 + const), na.rm = TRUE) / sum(subjectsPerStage[1:2, k], na.rm = TRUE) / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[1:2, k], na.rm = TRUE) * const / (1 + const)^2) } } } populationSubjectsPerStage[2, k] <- sum(subjectsPerStage[1:2, k], na.rm = TRUE) overallRatesTreatment[2, k] <- sum(simEventsTreatment[1:2, 1:k], na.rm = TRUE) / round(const / (1 + const) * sum(subjectsPerStage[1:2, 1:k], na.rm = TRUE)) overallRatesControl[2, k] <- sum(simEventsControl[1:2, 1:k], na.rm = TRUE) / round(1 / (1 + const) * sum(subjectsPerStage[1:2, 1:k], na.rm = TRUE)) overallEffectSizes[2, k] <- (2 * directionUpper - 1) * (overallRatesTreatment[2, k] - overallRatesControl[2, k]) rm <- sum(simEventsControl[1:2, 1:k] + simEventsTreatment[1:2, 1:k], na.rm = TRUE) / sum(subjectsPerStage[1:2, 1:k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { overallTestStatistics[2, k] <- 0 } else { overallTestStatistics[2, k] <- overallEffectSizes[2, k] / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[1:2, 1:k], na.rm = TRUE) * const / (1 + const)^2) } } } else if (gMax == 3) { # Population S1 if (stratifiedAnalysis) { rm <- (simEventsControl[c(1, 3), k] + simEventsTreatment[c(1, 3), k]) / subjectsPerStage[c(1, 3), k] rm[!is.na(rm) & (rm <= 0 | rm >= 1)] <- 0 if (!all(is.na(rm))) { if (all(na.omit(rm) == 0) || all(na.omit(rm) == 1)) { testStatistics[1, k] <- 0 } else { testStatistics[1, k] <- sqrt(const) / (1 + const) * (2 * directionUpper - 1) * sum(subjectsPerStage[c(1, 3), k] * (simEventsTreatment[c(1, 3), k] * (1 + const) / const - simEventsControl[c(1, 3), k] * (1 + const)) / subjectsPerStage[c(1, 3), k], na.rm = TRUE) / sqrt(sum(rm * (1 - rm) * subjectsPerStage[c(1, 3), k], na.rm = TRUE)) } } } else { rm <- sum(simEventsControl[c(1, 3), k] + simEventsTreatment[c(1, 3), k], na.rm = TRUE) / sum(subjectsPerStage[c(1, 3), k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { testStatistics[1, k] <- 0 } else { testStatistics[1, k] <- (2 * directionUpper - 1) * sum(simEventsTreatment[c(1, 3), k] * (1 + const) / const - simEventsControl[c(1, 3), k] * (1 + const), na.rm = TRUE) / sum(subjectsPerStage[c(1, 3), k], na.rm = TRUE) / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[c(1, 3), k], na.rm = TRUE) * const / (1 + const)^2) } } } populationSubjectsPerStage[1, k] <- sum(subjectsPerStage[c(1, 3), k], na.rm = TRUE) overallRatesTreatment[1, k] <- sum(simEventsTreatment[c(1, 3), 1:k], na.rm = TRUE) / round(const / (1 + const) * sum(subjectsPerStage[c(1, 3), 1:k], na.rm = TRUE)) overallRatesControl[1, k] <- sum(simEventsControl[c(1, 3), 1:k], na.rm = TRUE) / round(1 / (1 + const) * sum(subjectsPerStage[c(1, 3), 1:k], na.rm = TRUE)) overallEffectSizes[1, k] <- (2 * directionUpper - 1) * (overallRatesTreatment[1, k] - overallRatesControl[1, k]) rm <- sum(simEventsControl[c(1, 3), 1:k] + simEventsTreatment[c(1, 3), 1:k], na.rm = TRUE) / sum(subjectsPerStage[c(1, 3), 1:k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { overallTestStatistics[1, k] <- 0 } else { overallTestStatistics[1, k] <- overallEffectSizes[1, k] / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[c(1, 3), 1:k], na.rm = TRUE) * const / (1 + const)^2) } } # Population S2 if (stratifiedAnalysis) { rm <- (simEventsControl[c(2, 3), k] + simEventsTreatment[c(2, 3), k]) / subjectsPerStage[c(2, 3), k] rm[!is.na(rm) & (rm <= 0 | rm >= 1)] <- 0 if (!all(is.na(rm))) { if (all(na.omit(rm) == 0) || all(na.omit(rm) == 1)) { testStatistics[2, k] <- 0 } else { testStatistics[2, k] <- sqrt(const) / (1 + const) * (2 * directionUpper - 1) * sum(subjectsPerStage[c(2, 3), k] * (simEventsTreatment[c(2, 3), k] * (1 + const) / const - simEventsControl[c(2, 3), k] * (1 + const)) / subjectsPerStage[c(2, 3), k], na.rm = TRUE) / sqrt(sum(rm * (1 - rm) * subjectsPerStage[c(2, 3), k], na.rm = TRUE)) } } } else { rm <- sum(simEventsControl[c(2, 3), k] + simEventsTreatment[c(2, 3), k], na.rm = TRUE) / sum(subjectsPerStage[c(2, 3), k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { testStatistics[2, k] <- 0 } else { testStatistics[2, k] <- (2 * directionUpper - 1) * sum(simEventsTreatment[c(2, 3), k] * (1 + const) / const - simEventsControl[c(2, 3), k] * (1 + const), na.rm = TRUE) / sum(subjectsPerStage[c(2, 3), k], na.rm = TRUE) / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[c(2, 3), k], na.rm = TRUE) * const / (1 + const)^2) } } } populationSubjectsPerStage[2, k] <- sum(subjectsPerStage[c(2, 3), k], na.rm = TRUE) overallRatesTreatment[2, k] <- sum(simEventsTreatment[c(2, 3), 1:k], na.rm = TRUE) / round(const / (1 + const) * sum(subjectsPerStage[c(2, 3), 1:k], na.rm = TRUE)) overallRatesControl[2, k] <- sum(simEventsControl[c(2, 3), 1:k], na.rm = TRUE) / round(1 / (1 + const) * sum(subjectsPerStage[c(2, 3), 1:k], na.rm = TRUE)) overallEffectSizes[2, k] <- (2 * directionUpper - 1) * (overallRatesTreatment[2, k] - overallRatesControl[2, k]) rm <- sum(simEventsControl[c(2, 3), 1:k] + simEventsTreatment[c(2, 3), 1:k], na.rm = TRUE) / sum(subjectsPerStage[c(2, 3), 1:k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { overallTestStatistics[2, k] <- 0 } else { overallTestStatistics[2, k] <- overallEffectSizes[2, k] / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[c(2, 3), 1:k], na.rm = TRUE) * const / (1 + const)^2) } } # Full population if (stratifiedAnalysis) { rm <- (simEventsControl[1:4, k] + simEventsTreatment[1:4, k]) / subjectsPerStage[1:4, k] rm[!is.na(rm) & (rm <= 0 | rm >= 1)] <- 0 if (!all(is.na(rm))) { if (all(na.omit(rm) == 0) || all(na.omit(rm) == 1)) { testStatistics[3, k] <- 0 } else { testStatistics[3, k] <- sqrt(const) / (1 + const) * (2 * directionUpper - 1) * sum(subjectsPerStage[1:4, k] * (simEventsTreatment[1:4, k] * (1 + const) / const - simEventsControl[1:4, k] * (1 + const)) / subjectsPerStage[1:4, k], na.rm = TRUE) / sqrt(sum(rm * (1 - rm) * subjectsPerStage[1:4, k], na.rm = TRUE)) } } } else { rm <- sum(simEventsControl[1:4, k] + simEventsTreatment[1:4, k], na.rm = TRUE) / sum(subjectsPerStage[1:4, k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { testStatistics[3, k] <- 0 } else { testStatistics[3, k] <- (2 * directionUpper - 1) * sum(simEventsTreatment[1:4, k] * (1 + const) / const - simEventsControl[1:4, k] * (1 + const), na.rm = TRUE) / sum(subjectsPerStage[1:4, k], na.rm = TRUE) / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[1:4, k], na.rm = TRUE) * const / (1 + const)^2) } } } populationSubjectsPerStage[3, k] <- sum(subjectsPerStage[1:4, k], na.rm = TRUE) overallRatesTreatment[3, k] <- sum(simEventsTreatment[1:4, 1:k], na.rm = TRUE) / round(const / (1 + const) * sum(subjectsPerStage[1:4, 1:k], na.rm = TRUE)) overallRatesControl[3, k] <- sum(simEventsControl[1:4, 1:k], na.rm = TRUE) / round(1 / (1 + const) * sum(subjectsPerStage[1:4, 1:k], na.rm = TRUE)) overallEffectSizes[3, k] <- (2 * directionUpper - 1) * (overallRatesTreatment[3, k] - overallRatesControl[3, k]) rm <- sum(simEventsControl[1:4, 1:k] + simEventsTreatment[1:4, 1:k], na.rm = TRUE) / sum(subjectsPerStage[1:4, 1:k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { overallTestStatistics[3, k] <- 0 } else { overallTestStatistics[3, k] <- overallEffectSizes[3, k] / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[1:4, 1:k], na.rm = TRUE) * const / (1 + const)^2) } } } else if (gMax == 4) { # Population S1 if (stratifiedAnalysis) { rm <- (simEventsControl[c(1, 4, 5, 7), k] + simEventsTreatment[c(1, 4, 5, 7), k]) / subjectsPerStage[c(1, 4, 5, 7), k] rm[!is.na(rm) & (rm <= 0 | rm >= 1)] <- 0 if (!all(is.na(rm))) { if (all(na.omit(rm) == 0) || all(na.omit(rm) == 1)) { testStatistics[1, k] <- 0 } else { testStatistics[1, k] <- sqrt(const) / (1 + const) * (2 * directionUpper - 1) * sum(subjectsPerStage[c(1, 4, 5, 7), k] * (simEventsTreatment[c(1, 4, 5, 7), k] * (1 + const) / const - simEventsControl[c(1, 4, 5, 7), k] * (1 + const)) / subjectsPerStage[c(1, 4, 5, 7), k], na.rm = TRUE) / sqrt(sum(rm * (1 - rm) * subjectsPerStage[c(1, 4, 5, 7), k], na.rm = TRUE)) } } } else { rm <- sum(simEventsControl[c(1, 4, 5, 7), k] + simEventsTreatment[c(1, 4, 5, 7), k], na.rm = TRUE) / sum(subjectsPerStage[c(1, 4, 5, 7), k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { testStatistics[1, k] <- 0 } else { testStatistics[1, k] <- (2 * directionUpper - 1) * sum(simEventsTreatment[c(1, 4, 5, 7), k] * (1 + const) / const - simEventsControl[c(1, 4, 5, 7), k] * (1 + const), na.rm = TRUE) / sum(subjectsPerStage[c(1, 4, 5, 7), k], na.rm = TRUE) / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[c(1, 4, 5, 7), k], na.rm = TRUE) * const / (1 + const)^2) } } } populationSubjectsPerStage[1, k] <- sum(subjectsPerStage[c(1, 4, 5, 7), k], na.rm = TRUE) overallRatesTreatment[1, k] <- sum(simEventsTreatment[c(1, 4, 5, 7), 1:k], na.rm = TRUE) / round(const / (1 + const) * sum(subjectsPerStage[c(1, 4, 5, 7), 1:k], na.rm = TRUE)) overallRatesControl[1, k] <- sum(simEventsControl[c(1, 4, 5, 7), 1:k], na.rm = TRUE) / round(1 / (1 + const) * sum(subjectsPerStage[c(1, 4, 5, 7), 1:k], na.rm = TRUE)) overallEffectSizes[1, k] <- (2 * directionUpper - 1) * (overallRatesTreatment[1, k] - overallRatesControl[1, k]) rm <- sum(simEventsControl[c(1, 4, 5, 7), 1:k] + simEventsTreatment[c(1, 4, 5, 7), 1:k], na.rm = TRUE) / sum(subjectsPerStage[c(1, 4, 5, 7), 1:k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { overallTestStatistics[1, k] <- 0 } else { overallTestStatistics[1, k] <- overallEffectSizes[1, k] / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[c(1, 4, 5, 7), 1:k], na.rm = TRUE) * const / (1 + const)^2) } } # Population S2 if (stratifiedAnalysis) { rm <- (simEventsControl[c(2, 4, 6, 7), k] + simEventsTreatment[c(2, 4, 6, 7), k]) / subjectsPerStage[c(2, 4, 6, 7), k] rm[!is.na(rm) & (rm <= 0 | rm >= 1)] <- 0 if (!all(is.na(rm))) { if (all(na.omit(rm) == 0) || all(na.omit(rm) == 1)) { testStatistics[2, k] <- 0 } else { testStatistics[2, k] <- sqrt(const) / (1 + const) * (2 * directionUpper - 1) * sum(subjectsPerStage[c(2, 4, 6, 7), k] * (simEventsTreatment[c(2, 4, 6, 7), k] * (1 + const) / const - simEventsControl[c(2, 4, 6, 7), k] * (1 + const)) / subjectsPerStage[c(2, 4, 6, 7), k], na.rm = TRUE) / sqrt(sum(rm * (1 - rm) * subjectsPerStage[c(2, 4, 6, 7), k], na.rm = TRUE)) } } } else { rm <- sum(simEventsControl[c(2, 4, 6, 7), k] + simEventsTreatment[c(2, 4, 6, 7), k], na.rm = TRUE) / sum(subjectsPerStage[c(2, 4, 6, 7), k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { testStatistics[2, k] <- 0 } else { testStatistics[2, k] <- (2 * directionUpper - 1) * sum(simEventsTreatment[c(2, 4, 6, 7), k] * (1 + const) / const - simEventsControl[c(2, 4, 6, 7), k] * (1 + const), na.rm = TRUE) / sum(subjectsPerStage[c(2, 4, 6, 7), k], na.rm = TRUE) / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[c(2, 4, 6, 7), k], na.rm = TRUE) * const / (1 + const)^2) } } } populationSubjectsPerStage[2, k] <- sum(subjectsPerStage[c(2, 4, 6, 7), k], na.rm = TRUE) overallRatesTreatment[2, k] <- sum(simEventsTreatment[c(2, 4, 6, 7), 1:k], na.rm = TRUE) / round(const / (1 + const) * sum(subjectsPerStage[c(2, 4, 6, 7), 1:k], na.rm = TRUE)) overallRatesControl[2, k] <- sum(simEventsControl[c(2, 4, 6, 7), 1:k], na.rm = TRUE) / round(1 / (1 + const) * sum(subjectsPerStage[c(2, 4, 6, 7), 1:k], na.rm = TRUE)) overallEffectSizes[2, k] <- (2 * directionUpper - 1) * (overallRatesTreatment[2, k] - overallRatesControl[2, k]) rm <- sum(simEventsControl[c(2, 4, 6, 7), 1:k] + simEventsTreatment[c(2, 4, 6, 7), 1:k], na.rm = TRUE) / sum(subjectsPerStage[c(2, 4, 6, 7), 1:k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { overallTestStatistics[2, k] <- 0 } else { overallTestStatistics[2, k] <- overallEffectSizes[2, k] / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[c(2, 4, 6, 7), 1:k], na.rm = TRUE) * const / (1 + const)^2) } } # Population S3 if (stratifiedAnalysis) { rm <- (simEventsControl[c(3, 5, 6, 7), k] + simEventsTreatment[c(3, 5, 6, 7), k]) / subjectsPerStage[c(3, 5, 6, 7), k] rm[!is.na(rm) & (rm <= 0 | rm >= 1)] <- 0 if (!all(is.na(rm))) { if (all(na.omit(rm) == 0) || all(na.omit(rm) == 1)) { testStatistics[3, k] <- 0 } else { testStatistics[3, k] <- sqrt(const) / (1 + const) * (2 * directionUpper - 1) * sum(subjectsPerStage[c(3, 5, 6, 7), k] * (simEventsTreatment[c(3, 5, 6, 7), k] * (1 + const) / const - simEventsControl[c(3, 5, 6, 7), k] * (1 + const)) / subjectsPerStage[c(3, 5, 6, 7), k], na.rm = TRUE) / sqrt(sum(rm * (1 - rm) * subjectsPerStage[c(3, 5, 6, 7), k], na.rm = TRUE)) } } } else { rm <- sum(simEventsControl[c(3, 5, 6, 7), k] + simEventsTreatment[c(3, 5, 6, 7), k], na.rm = TRUE) / sum(subjectsPerStage[c(3, 5, 6, 7), k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { testStatistics[3, k] <- 0 } else { testStatistics[3, k] <- (2 * directionUpper - 1) * sum(simEventsTreatment[c(3, 5, 6, 7), k] * (1 + const) / const - simEventsControl[c(3, 5, 6, 7), k] * (1 + const), na.rm = TRUE) / sum(subjectsPerStage[c(3, 5, 6, 7), k], na.rm = TRUE) / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[c(3, 5, 6, 7), k], na.rm = TRUE) * const / (1 + const)^2) } } } populationSubjectsPerStage[3, k] <- sum(subjectsPerStage[c(3, 5, 6, 7), k], na.rm = TRUE) overallRatesTreatment[3, k] <- sum(simEventsTreatment[c(3, 5, 6, 7), 1:k], na.rm = TRUE) / round(const / (1 + const) * sum(subjectsPerStage[c(3, 5, 6, 7), 1:k], na.rm = TRUE)) overallRatesControl[3, k] <- sum(simEventsControl[c(3, 5, 6, 7), 1:k], na.rm = TRUE) / round(1 / (1 + const) * sum(subjectsPerStage[c(3, 5, 6, 7), 1:k], na.rm = TRUE)) overallEffectSizes[3, k] <- (2 * directionUpper - 1) * (overallRatesTreatment[3, k] - overallRatesControl[3, k]) rm <- sum(simEventsControl[c(3, 5, 6, 7), 1:k] + simEventsTreatment[c(3, 5, 6, 7), 1:k], na.rm = TRUE) / sum(subjectsPerStage[c(3, 5, 6, 7), 1:k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { overallTestStatistics[3, k] <- 0 } else { overallTestStatistics[3, k] <- overallEffectSizes[3, k] / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[c(3, 5, 6, 7), 1:k], na.rm = TRUE) * const / (1 + const)^2) } } # Full population if (stratifiedAnalysis) { rm <- (simEventsControl[1:8, k] + simEventsTreatment[1:8, k]) / subjectsPerStage[1:8, k] rm[!is.na(rm) & (rm <= 0 | rm >= 1)] <- 0 if (!all(is.na(rm))) { if (all(na.omit(rm) == 0) || all(na.omit(rm) == 1)) { testStatistics[4, k] <- 0 } else { testStatistics[4, k] <- sqrt(const) / (1 + const) * (2 * directionUpper - 1) * sum(subjectsPerStage[1:8, k] * (simEventsTreatment[1:8, k] * (1 + const) / const - simEventsControl[1:8, k] * (1 + const)) / subjectsPerStage[1:8, k], na.rm = TRUE) / sqrt(sum(rm * (1 - rm) * subjectsPerStage[1:8, k], na.rm = TRUE)) } } } else { rm <- sum(simEventsControl[1:8, k] + simEventsTreatment[1:8, k], na.rm = TRUE) / sum(subjectsPerStage[1:8, k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { testStatistics[4, k] <- 0 } else { testStatistics[4, k] <- (2 * directionUpper - 1) * sum(simEventsTreatment[1:8, k] * (1 + const) / const - simEventsControl[1:8, k] * (1 + const), na.rm = TRUE) / sum(subjectsPerStage[1:8, k], na.rm = TRUE) / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[1:8, k], na.rm = TRUE) * const / (1 + const)^2) } } } populationSubjectsPerStage[4, k] <- sum(subjectsPerStage[1:8, k], na.rm = TRUE) overallRatesTreatment[4, k] <- sum(simEventsTreatment[1:8, 1:k], na.rm = TRUE) / round(const / (1 + const) * sum(subjectsPerStage[1:8, 1:k], na.rm = TRUE)) overallRatesControl[4, k] <- sum(simEventsControl[1:8, 1:k], na.rm = TRUE) / round(1 / (1 + const) * sum(subjectsPerStage[1:8, 1:k], na.rm = TRUE)) overallEffectSizes[4, k] <- (2 * directionUpper - 1) * (overallRatesTreatment[4, k] - overallRatesControl[4, k]) rm <- sum(simEventsControl[1:8, 1:k] + simEventsTreatment[1:8, 1:k], na.rm = TRUE) / sum(subjectsPerStage[1:8, 1:k], na.rm = TRUE) if (!is.na(rm)) { if (rm <= 0 || rm >= 1) { overallTestStatistics[4, k] <- 0 } else { overallTestStatistics[4, k] <- overallEffectSizes[4, k] / sqrt(rm * (1 - rm)) * sqrt(sum(subjectsPerStage[1:8, 1:k], na.rm = TRUE) * const / (1 + const)^2) } } } testStatistics[!selectedPopulations[, k], k] <- NA_real_ overallEffectSizes[!selectedPopulations[, k], k] <- NA_real_ overallTestStatistics[!selectedPopulations[, k], k] <- NA_real_ separatePValues[, k] <- 1 - stats::pnorm(testStatistics[, k]) if (k < kMax) { if (colSums(selectedPopulations)[k] == 0) { break } # Bonferroni adjustment adjustedPValues[k] <- min(min(separatePValues[, k], na.rm = TRUE) * colSums(selectedPopulations)[k], 1 - 1e-12) # conditional critical value to reject the null hypotheses at the next stage of the trial if (.isTrialDesignFisher(design)) { conditionalCriticalValue[k] <- .getOneMinusQNorm(min((design$criticalValues[k + 1] / prod(adjustedPValues[1:k]^weights[1:k]))^(1 / weights[k + 1]), 1 - 1e-7)) } else { if (design$criticalValues[k + 1] >= 6) { conditionalCriticalValue[k] <- Inf } else { conditionalCriticalValue[k] <- (design$criticalValues[k + 1] * sqrt(design$informationRates[k + 1]) - .getOneMinusQNorm(adjustedPValues[1:k]) %*% weights[1:k]) / sqrt(design$informationRates[k + 1] - design$informationRates[k]) } } if (adaptations[k]) { if (effectMeasure == "testStatistic") { selectedPopulations[, k + 1] <- (selectedPopulations[, k] & .selectPopulations( k, overallTestStatistics[, k] + runif(gMax, -1e-05, 1e-05), typeOfSelection, epsilonValue, rValue, threshold, selectPopulationsFunction )) } else if (effectMeasure == "effectEstimate") { selectedPopulations[, k + 1] <- (selectedPopulations[, k] & .selectPopulations( k, overallEffectSizes[, k] + runif(gMax, -1e-05, 1e-05), typeOfSelection, epsilonValue, rValue, threshold, selectPopulationsFunction )) } newSubjects <- calcSubjectsFunction( stage = k + 1, # to be consistent with non-enrichment situation, cf. line 40 directionUpper = directionUpper, conditionalPower = conditionalPower, conditionalCriticalValue = conditionalCriticalValue, plannedSubjects = plannedSubjects, allocationRatioPlanned = allocationRatioPlanned, selectedPopulations = selectedPopulations, piTreatmentH1 = piTreatmentH1, piControlH1 = piControlH1, overallRatesTreatment = overallRatesTreatment, overallRatesControl = overallRatesControl, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage ) if (is.null(newSubjects) || length(newSubjects) != 1 || !is.numeric(newSubjects) || is.na(newSubjects)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'calcSubjectsFunction' returned an illegal or undefined result (", newSubjects, "); ", "the output must be a single numeric value" ) } if (!is.na(conditionalPower) || calcSubjectsFunctionIsUserDefined) { plannedSubjects[(k + 1):kMax] <- plannedSubjects[k] + cumsum(rep(newSubjects, kMax - k)) } } else { selectedPopulations[, k + 1] <- selectedPopulations[, k] } if (is.na(piControlH1)) { pi2H1 <- overallRatesControl[, k] } else { pi2H1 <- piControlH1 } if (is.na(piTreatmentH1)) { pi1H1 <- overallRatesTreatment[, k] } else { pi1H1 <- piTreatmentH1 } pim <- (allocationRatioPlanned[k] * pi1H1 + pi2H1) / (1 + allocationRatioPlanned[k]) if (any(pi1H1 * (1 - pi1H1) + pi2H1 * (1 - pi2H1) == 0)) { thetaStandardized <- 0 } else { thetaStandardized <- sqrt(allocationRatioPlanned[k]) / (1 + allocationRatioPlanned[k]) * ( (pi1H1 - pi2H1) * sqrt(1 + allocationRatioPlanned[k]) / sqrt(pi1H1 * (1 - pi1H1) + allocationRatioPlanned[k] * pi2H1 * (1 - pi2H1)) + sign(pi1H1 - pi2H1) * conditionalCriticalValue[k] * (1 - sqrt(pim * (1 - pim) + allocationRatioPlanned[k] * pim * (1 - pim)) / sqrt(pi1H1 * (1 - pi1H1) + allocationRatioPlanned[k] * pi2H1 * (1 - pi2H1))) * (1 + allocationRatioPlanned[k]) / sqrt(allocationRatioPlanned[k] * (plannedSubjects[k + 1] - plannedSubjects[k])) ) } thetaStandardized <- (2 * directionUpper - 1) * thetaStandardized thetaStandardized <- min(thetaStandardized, na.rm = TRUE) conditionalPowerPerStage[k] <- 1 - stats::pnorm(conditionalCriticalValue[k] - thetaStandardized * sqrt(plannedSubjects[k + 1] - plannedSubjects[k])) } } return(list( subjectsPerStage = subjectsPerStage, populationSubjectsPerStage = populationSubjectsPerStage, allocationRatioPlanned = allocationRatioPlanned, overallEffectSizes = overallEffectSizes, testStatistics = testStatistics, directionUpper = directionUpper, overallTestStatistics = overallTestStatistics, overallRatesControl = overallRatesControl, overallRatesTreatment = overallRatesTreatment, separatePValues = separatePValues, conditionalCriticalValue = conditionalCriticalValue, conditionalPowerPerStage = conditionalPowerPerStage, selectedPopulations = selectedPopulations )) } #' #' @title #' Get Simulation Enrichment Rates #' #' @description #' Returns the simulated power, stopping and selection probabilities, conditional power, #' and expected sample size for testing rates in an enrichment design testing situation. #' #' @param piControlH1 If specified, the assumed probabilities in the control arm #' under which the sample size recalculation was performed #' and the conditional power was calculated. #' @param piTreatmentH1 If specified, the assumed probabilities in the active arm #' under which the sample size recalculation was performed #' and the conditional power was calculated. #' @inheritParams param_intersectionTest_Enrichment #' @inheritParams param_typeOfSelection #' @inheritParams param_effectMeasure #' @inheritParams param_adaptations #' @inheritParams param_threshold #' @inheritParams param_effectList #' @inheritParams param_successCriterion #' @inheritParams param_typeOfSelection #' @inheritParams param_design_with_default #' @inheritParams param_directionUpper #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_plannedSubjects #' @inheritParams param_minNumberOfSubjectsPerStage #' @inheritParams param_maxNumberOfSubjectsPerStage #' @inheritParams param_conditionalPowerSimulation #' @inheritParams param_maxNumberOfIterations #' @inheritParams param_calcSubjectsFunction #' @inheritParams param_selectPopulationsFunction #' @inheritParams param_rValue #' @inheritParams param_epsilonValue #' @inheritParams param_seed #' @inheritParams param_three_dots #' @inheritParams param_showStatistics #' @inheritParams param_stratifiedAnalysis #' #' @details #' At given design the function simulates the power, stopping probabilities, #' selection probabilities, and expected sample size at given number of subjects, #' parameter configuration, and treatment arm selection rule in the enrichment situation. #' An allocation ratio can be specified referring to the ratio of number of #' subjects in the active treatment groups as compared to the control group. #' #' The definition of \code{piTreatmentH1} and/or \code{piControlH1} makes only sense if \code{kMax} > 1 #' and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and #' \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. #' #' \code{calcSubjectsFunction}\cr #' This function returns the number of subjects at given conditional power and #' conditional critical value for specified testing situation. #' The function might depend on the variables #' \code{stage}, #' \code{selectedPopulations}, #' \code{directionUpper}, #' \code{plannedSubjects}, #' \code{allocationRatioPlanned}, #' \code{minNumberOfSubjectsPerStage}, #' \code{maxNumberOfSubjectsPerStage}, #' \code{conditionalPower}, #' \code{conditionalCriticalValue}, #' \code{overallRatesTreatment}, #' \code{overallRatesControl}, #' \code{piTreatmentH1}, and #' \code{piControlH1}. #' The function has to contain the three-dots argument '...' (see examples). #' #' @template return_object_simulation_results #' @template how_to_get_help_for_generics #' #' @template examples_get_simulation_enrichment_rates #' #' @export #' getSimulationEnrichmentRates <- function(design = NULL, ..., effectList = NULL, intersectionTest = c("Simes", "SpiessensDebois", "Bonferroni", "Sidak"), # C_INTERSECTION_TEST_ENRICHMENT_DEFAULT stratifiedAnalysis = TRUE, # C_STRATIFIED_ANALYSIS_DEFAULT, directionUpper = TRUE, # C_DIRECTION_UPPER_DEFAULT adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), # C_TYPE_OF_SELECTION_DEFAULT effectMeasure = c("effectEstimate", "testStatistic"), # C_EFFECT_MEASURE_DEFAULT successCriterion = c("all", "atLeastOne"), # C_SUCCESS_CRITERION_DEFAULT epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedSubjects = NA_real_, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, piTreatmentH1 = NA_real_, piControlH1 = NA_real_, maxNumberOfIterations = 1000L, # C_MAX_SIMULATION_ITERATIONS_DEFAULT seed = NA_real_, calcSubjectsFunction = NULL, selectPopulationsFunction = NULL, showStatistics = FALSE) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "simulation") .warnInCaseOfUnknownArguments( functionName = "getSimulationEnrichmentRates", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "showStatistics"), ... ) } else { .assertIsTrialDesignInverseNormalOrFisher(design) .warnInCaseOfUnknownArguments(functionName = "getSimulationEnrichmentRates", ignore = "showStatistics", ...) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsOneSidedDesign(design, designType = "enrichment", engineType = "simulation") calcSubjectsFunctionIsUserDefined <- !is.null(calcSubjectsFunction) simulationResults <- .createSimulationResultsEnrichmentObject( design = design, effectList = effectList, intersectionTest = intersectionTest, stratifiedAnalysis = stratifiedAnalysis, directionUpper = directionUpper, # rates + survival only adaptations = adaptations, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, successCriterion = successCriterion, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, plannedSubjects = plannedSubjects, # means + rates only allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, # means + rates only maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, # means + rates only conditionalPower = conditionalPower, piTreatmentH1 = piTreatmentH1, # rates only piControlH1 = piControlH1, # rates only maxNumberOfIterations = maxNumberOfIterations, seed = seed, calcSubjectsFunction = calcSubjectsFunction, # means + rates only selectPopulationsFunction = selectPopulationsFunction, showStatistics = showStatistics, endpoint = "rates" ) design <- simulationResults$.design successCriterion <- simulationResults$successCriterion effectMeasure <- simulationResults$effectMeasure adaptations <- simulationResults$adaptations gMax <- simulationResults$populations kMax <- simulationResults$.design$kMax intersectionTest <- simulationResults$intersectionTest typeOfSelection <- simulationResults$typeOfSelection effectList <- simulationResults$effectList piTreatmentH1 <- simulationResults$piTreatmentH1 # rates only piControlH1 <- simulationResults$piControlH1 # rates only conditionalPower <- simulationResults$conditionalPower minNumberOfSubjectsPerStage <- simulationResults$minNumberOfSubjectsPerStage maxNumberOfSubjectsPerStage <- simulationResults$maxNumberOfSubjectsPerStage allocationRatioPlanned <- simulationResults$allocationRatioPlanned calcSubjectsFunction <- simulationResults$calcSubjectsFunction if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, kMax) } indices <- .getIndicesOfClosedHypothesesSystemForSimulation(gMax = gMax) cols <- nrow(effectList$piTreatments) simulatedSelections <- array(0, dim = c(kMax, cols, gMax)) simulatedRejections <- array(0, dim = c(kMax, cols, gMax)) simulatedNumberOfPopulations <- matrix(0, nrow = kMax, ncol = cols) simulatedSubjectsPerStage <- array(0, dim = c(kMax, cols, 2^(gMax - 1))) simulatedSuccessStopping <- matrix(0, nrow = kMax, ncol = cols) simulatedFutilityStopping <- matrix(0, nrow = kMax - 1, ncol = cols) simulatedConditionalPower <- matrix(0, nrow = kMax, ncol = cols) simulatedRejectAtLeastOne <- rep(0, cols) expectedNumberOfSubjects <- rep(0, cols) iterations <- matrix(0, nrow = kMax, ncol = cols) len <- maxNumberOfIterations * kMax * gMax * cols dataIterationNumber <- rep(NA_real_, len) dataStageNumber <- rep(NA_real_, len) dataPopulationNumber <- rep(NA_real_, len) dataEffect <- rep(NA_real_, len) dataSubjectsPopulation <- rep(NA_real_, len) dataSubjectsActivePopulation <- rep(NA_real_, len) dataNumberOfSubjects <- rep(NA_real_, len) dataNumberOfCumulatedSubjects <- rep(NA_real_, len) dataRejectPerStage <- rep(NA, len) dataFutilityStop <- rep(NA_real_, len) dataSuccessStop <- rep(NA, len) dataFutilityStop <- rep(NA, len) dataTestStatistics <- rep(NA_real_, len) dataConditionalCriticalValue <- rep(NA_real_, len) dataConditionalPowerAchieved <- rep(NA_real_, len) dataEffectEstimate <- rep(NA_real_, len) dataPValuesSeparate <- rep(NA_real_, len) piControls <- effectList$piControls if (length(piControls) == 1) { piControls <- rep(piControls, ncol(effectList$piTreatments)) } index <- 1 for (i in 1:cols) { for (j in 1:maxNumberOfIterations) { stageResults <- .getSimulatedStageRatesEnrichment( design = design, subsets = effectList$subsets, prevalences = effectList$prevalences, piTreatments = effectList$piTreatments[i, ], piControls = piControls, directionUpper = directionUpper, stratifiedAnalysis = stratifiedAnalysis, plannedSubjects = plannedSubjects, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, adaptations = adaptations, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, conditionalPower = conditionalPower, piTreatmentH1 = piTreatmentH1, piControlH1 = piControlH1, calcSubjectsFunction = calcSubjectsFunction, calcSubjectsFunctionIsUserDefined = calcSubjectsFunctionIsUserDefined, selectPopulationsFunction = selectPopulationsFunction ) closedTest <- .performClosedCombinationTestForSimulationEnrichment( stageResults = stageResults, design = design, indices = indices, intersectionTest = intersectionTest, successCriterion = successCriterion ) rejectAtSomeStage <- FALSE rejectedPopulationsBefore <- rep(FALSE, gMax) for (k in 1:kMax) { simulatedRejections[k, i, ] <- simulatedRejections[k, i, ] + (closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore) simulatedSelections[k, i, ] <- simulatedSelections[k, i, ] + closedTest$selectedPopulations[, k] simulatedNumberOfPopulations[k, i] <- simulatedNumberOfPopulations[k, i] + sum(closedTest$selectedPopulations[, k]) if (!any(is.na(closedTest$successStop))) { simulatedSuccessStopping[k, i] <- simulatedSuccessStopping[k, i] + closedTest$successStop[k] } if ((kMax > 1) && (k < kMax)) { if (!any(is.na(closedTest$futilityStop))) { simulatedFutilityStopping[k, i] <- simulatedFutilityStopping[k, i] + (closedTest$futilityStop[k] && !closedTest$successStop[k]) } if (!closedTest$successStop[k] && !closedTest$futilityStop[k]) { simulatedConditionalPower[k + 1, i] <- simulatedConditionalPower[k + 1, i] + stageResults$conditionalPowerPerStage[k] } } iterations[k, i] <- iterations[k, i] + 1 for (p in 1:2^(gMax - 1)) { if (!is.na(stageResults$subjectsPerStage[p, k])) { simulatedSubjectsPerStage[k, i, p] <- simulatedSubjectsPerStage[k, i, p] + stageResults$subjectsPerStage[p, k] } } for (g in 1:gMax) { dataIterationNumber[index] <- j dataStageNumber[index] <- k dataPopulationNumber[index] <- g dataEffect[index] <- i dataSubjectsPopulation[index] <- stageResults$populationSubjectsPerStage[g, k] dataNumberOfSubjects[index] <- round(sum(stageResults$subjectsPerStage[, k], na.rm = TRUE), 1) dataNumberOfCumulatedSubjects[index] <- sum(stageResults$subjectsPerStage[, 1:k], na.rm = TRUE) dataRejectPerStage[index] <- closedTest$rejected[g, k] dataTestStatistics[index] <- stageResults$testStatistics[g, k] dataSuccessStop[index] <- closedTest$successStop[k] if (k < kMax) { dataFutilityStop[index] <- closedTest$futilityStop[k] dataConditionalCriticalValue[index] <- stageResults$conditionalCriticalValue[k] dataConditionalPowerAchieved[index + 1] <- stageResults$conditionalPowerPerStage[k] } dataEffectEstimate[index] <- stageResults$overallEffectSizes[g, k] dataPValuesSeparate[index] <- closedTest$separatePValues[g, k] index <- index + 1 } if (!rejectAtSomeStage && any(closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore)) { simulatedRejectAtLeastOne[i] <- simulatedRejectAtLeastOne[i] + 1 rejectAtSomeStage <- TRUE } if ((k < kMax) && (closedTest$successStop[k] || closedTest$futilityStop[k])) { # rejected hypotheses remain rejected also in case of early stopping simulatedRejections[(k + 1):kMax, i, ] <- simulatedRejections[(k + 1):kMax, i, ] + matrix( (closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore), kMax - k, gMax, byrow = TRUE ) break } rejectedPopulationsBefore <- closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore } } simulatedSubjectsPerStage[is.na(simulatedSubjectsPerStage)] <- 0 simulatedSubjectsPerStage[, i, ] <- simulatedSubjectsPerStage[, i, ] / iterations[, i] if (kMax > 1) { simulatedRejections[2:kMax, i, ] <- simulatedRejections[2:kMax, i, ] - simulatedRejections[1:(kMax - 1), i, ] stopping <- cumsum(simulatedSuccessStopping[1:(kMax - 1), i] + simulatedFutilityStopping[, i]) / maxNumberOfIterations expectedNumberOfSubjects[i] <- sum(simulatedSubjectsPerStage[1, i, ] + t(1 - stopping) %*% simulatedSubjectsPerStage[2:kMax, i, ]) } else { expectedNumberOfSubjects[i] <- sum(simulatedSubjectsPerStage[1, i, ]) } } simulatedConditionalPower[1, ] <- NA_real_ if (kMax > 1) { simulatedConditionalPower[2:kMax, ] <- as.matrix(simulatedConditionalPower[2:kMax, ] / iterations[2:kMax, ]) } simulationResults$rejectAtLeastOne <- simulatedRejectAtLeastOne / maxNumberOfIterations simulationResults$numberOfPopulations <- simulatedNumberOfPopulations / iterations simulationResults$selectedPopulations <- simulatedSelections / maxNumberOfIterations simulationResults$rejectedPopulationsPerStage <- simulatedRejections / maxNumberOfIterations simulationResults$successPerStage <- simulatedSuccessStopping / maxNumberOfIterations simulationResults$futilityPerStage <- simulatedFutilityStopping / maxNumberOfIterations simulationResults$futilityStop <- base::colSums(simulatedFutilityStopping / maxNumberOfIterations) if (kMax > 1) { simulationResults$earlyStop <- simulationResults$futilityPerStage + simulationResults$successPerStage[1:(kMax - 1), ] simulationResults$conditionalPowerAchieved <- simulatedConditionalPower } simulationResults$sampleSizes <- simulatedSubjectsPerStage simulationResults$expectedNumberOfSubjects <- expectedNumberOfSubjects simulationResults$iterations <- iterations if (!all(is.na(simulationResults$conditionalPowerAchieved))) { simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } if (any(simulationResults$rejectedPopulationsPerStage < 0)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "internal error, simulation not possible due to numerical overflow") } data <- data.frame( iterationNumber = dataIterationNumber, stageNumber = dataStageNumber, populationNumber = dataPopulationNumber, effect = dataEffect, numberOfSubjects = dataNumberOfSubjects, numberOfCumulatedSubjects = dataNumberOfCumulatedSubjects, subjectsPopulation = dataSubjectsPopulation, effectEstimate = dataEffectEstimate, testStatistics = dataTestStatistics, pValue = dataPValuesSeparate, conditionalCriticalValue = round(dataConditionalCriticalValue, 6), conditionalPowerAchieved = round(dataConditionalPowerAchieved, 6), rejectPerStage = dataRejectPerStage, successStop = dataSuccessStop, futilityPerStage = dataFutilityStop ) data <- data[!is.na(data$effectEstimate), ] simulationResults$.data <- data return(simulationResults) } rpact/R/f_analysis_base_survival.R0000644000176200001440000016273714445307575017022 0ustar liggesusers## | ## | *Analysis of survival data with group sequential and combination test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_logger.R NULL .getAnalysisResultsSurvival <- function(..., design, dataInput) { if (.isTrialDesignGroupSequential(design)) { return(.getAnalysisResultsSurvivalGroupSequential( design = design, dataInput = dataInput, ... )) } if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsSurvivalInverseNormal( design = design, dataInput = dataInput, ... )) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsSurvivalFisher( design = design, dataInput = dataInput, ... )) } .stopWithWrongDesignMessage(design, inclusiveConditionalDunnett = FALSE) } .getAnalysisResultsSurvivalInverseNormal <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsSurvivalInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsInverseNormal(design = design, dataInput = dataInput) .getAnalysisResultsSurvivalAll( results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance ) return(results) } .getAnalysisResultsSurvivalGroupSequential <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsSurvivalGroupSequential", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsGroupSequential(design = design, dataInput = dataInput) .getAnalysisResultsSurvivalAll( results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance ) return(results) } .getAnalysisResultsSurvivalFisher <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, thetaH1 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsSurvivalFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsFisher(design = design, dataInput = dataInput) .setValueAndParameterType(results, "iterations", as.integer(iterations), C_ITERATIONS_DEFAULT) .setValueAndParameterType(results, "seed", seed, NA_real_) .getAnalysisResultsSurvivalAll( results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, thetaH0 = thetaH0, thetaH1 = thetaH1, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } #' #' The following parameters will be taken from 'design': #' stages, informationRate, criticalValues, futilityBounds, alphaSpent, stageLevels #' #' @noRd #' .getAnalysisResultsSurvivalAll <- function(..., results, design, dataInput, stage, directionUpper, thetaH0, thetaH1, nPlanned, allocationRatioPlanned, tolerance, iterations, seed) { startTime <- Sys.time() stageResults <- .getStageResultsSurvival( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper ) results$.setStageResults(stageResults) .logProgress("Stage results calculated", startTime = startTime) thetaH1User <- thetaH1 thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage) .assertIsInOpenInterval(thetaH1, "thetaH1", 0, Inf) if (identical(thetaH1, thetaH1User)) { .setValueAndParameterType(results, "thetaH1", thetaH1, NA_real_) } else { results$thetaH1 <- thetaH1 results$.setParameterType("thetaH1", C_PARAM_GENERATED) } .warnInCaseOfUnusedConditionalPowerArgument(results, nPlanned, "thetaH1", thetaH1) .setValueAndParameterType(results, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType(results, "normalApproximation", TRUE, TRUE) .setValueAndParameterType(results, "thetaH0", thetaH0, C_THETA_H0_SURVIVAL_DEFAULT) .setConditionalPowerArguments(results, dataInput, nPlanned, allocationRatioPlanned) # test actions results$testActions <- getTestActions(stageResults = stageResults) results$.setParameterType("testActions", C_PARAM_GENERATED) if (design$kMax > 1) { # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { results$.conditionalPowerResults <- .getConditionalPowerSurvival( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, iterations = iterations, seed = seed ) .synchronizeIterationsAndSeed(results) } else { results$.conditionalPowerResults <- .getConditionalPowerSurvival( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1 ) results$conditionalPower <- results$.conditionalPowerResults$conditionalPower results$.setParameterType("conditionalPower", C_PARAM_GENERATED) } .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() if (.isTrialDesignFisher(design) && isTRUE(.getOptionalArgument("simulateCRP", ...))) { results$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) seed <- results$.conditionalPowerResults$seed crp <- getConditionalRejectionProbabilities( stageResults = stageResults, iterations = iterations, seed = seed ) results$conditionalRejectionProbabilities <- crp$crpFisherSimulated paramTypeSeed <- results$.conditionalPowerResults$.getParameterType("seed") if (paramTypeSeed != C_PARAM_TYPE_UNKNOWN) { results$.setParameterType("seed", paramTypeSeed) } results$seed <- seed } else { results$conditionalRejectionProbabilities <- getConditionalRejectionProbabilities(stageResults = stageResults) } results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_GENERATED) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) } # RCI - repeated confidence interval startTime <- Sys.time() repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsSurvival( design = design, dataInput = dataInput, stage = stage, tolerance = tolerance ) results$repeatedConfidenceIntervalLowerBounds <- repeatedConfidenceIntervals[1, ] results$repeatedConfidenceIntervalUpperBounds <- repeatedConfidenceIntervals[2, ] results$.setParameterType("repeatedConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("repeatedConfidenceIntervalUpperBounds", C_PARAM_GENERATED) .logProgress("Repeated confidence interval calculated", startTime = startTime) # repeated p-value startTime <- Sys.time() results$repeatedPValues <- getRepeatedPValues( stageResults = stageResults, tolerance = tolerance ) results$.setParameterType("repeatedPValues", C_PARAM_GENERATED) .logProgress("Repeated p-values calculated", startTime = startTime) if (design$kMax > 1) { # final p-value startTime <- Sys.time() finalPValue <- getFinalPValue(stageResults, showWarnings = FALSE) results$finalPValues <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalPValue$pFinal, finalStage = finalPValue$finalStage ) results$finalStage <- finalPValue$finalStage results$.setParameterType("finalPValues", C_PARAM_GENERATED) results$.setParameterType("finalStage", C_PARAM_GENERATED) .logProgress("Final p-value calculated", startTime = startTime) # final confidence interval & median unbiased estimate startTime <- Sys.time() finalConfidenceIntervals <- .getFinalConfidenceIntervalSurvival( design = design, dataInput = dataInput, thetaH0 = thetaH0, stage = stage, directionUpper = directionUpper, tolerance = tolerance ) if (!is.null(finalConfidenceIntervals)) { finalStage <- finalConfidenceIntervals$finalStage results$finalConfidenceIntervalLowerBounds <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[1], finalStage = finalStage ) results$finalConfidenceIntervalUpperBounds <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[2], finalStage = finalStage ) results$medianUnbiasedEstimates <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalConfidenceIntervals$medianUnbiased, finalStage = finalStage ) results$.setParameterType("finalConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("finalConfidenceIntervalUpperBounds", C_PARAM_GENERATED) results$.setParameterType("medianUnbiasedEstimates", C_PARAM_GENERATED) .logProgress("Final confidence interval calculated", startTime = startTime) } } return(results) } #' @title #' Get Stage Results Survival #' #' @description #' Returns a stage results object #' #' @param design the trial design. #' #' @return Returns a \code{StageResultsSurvival} object. #' #' @keywords internal #' #' @noRd #' .getStageResultsSurvival <- function(..., design, dataInput, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, stage = NA_integer_, userFunctionCallEnabled = FALSE) { .assertIsDatasetSurvival(dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertIsValidDirectionUpper(directionUpper, design$sided, userFunctionCallEnabled = userFunctionCallEnabled ) .warnInCaseOfUnknownArguments( functionName = "getStageResultsSurvival", ignore = .getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), ... ) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design, stage = stage) overallEvents <- dataInput$getOverallEventsUpTo(stage, group = 1) overallAllocationRatios <- dataInput$getOverallAllocationRatiosUpTo(stage, group = 1) # Calculation of overall log-ranks for specified hypothesis overallLogRankTestStatistics <- dataInput$getOverallLogRanksUpTo(stage, group = 1) - sqrt(overallEvents) * sqrt(overallAllocationRatios) / (1 + overallAllocationRatios) * log(thetaH0) effectSizes <- exp(dataInput$getOverallLogRanksUpTo(stage, group = 1) * (1 + overallAllocationRatios[1:stage]) / sqrt(overallAllocationRatios[1:stage] * overallEvents[1:stage])) events <- dataInput$getEventsUpTo(stage, group = 1) allocationRatios <- dataInput$getAllocationRatiosUpTo(stage, group = 1) # Calculation of log-ranks for specified hypothesis logRankTestStatistics <- dataInput$getLogRanksUpTo(stage, group = 1) - sqrt(events) * sqrt(allocationRatios) / (1 + allocationRatios) * log(thetaH0) # Calculation of stage-wise test statistics and combination tests pValues <- rep(NA_real_, design$kMax) combInverseNormal <- rep(NA_real_, design$kMax) combFisher <- rep(NA_real_, design$kMax) weightsInverseNormal <- .getWeightsInverseNormal(design) weightsFisher <- .getWeightsFisher(design) if (directionUpper) { pValues <- 1 - stats::pnorm(logRankTestStatistics) overallPValues <- 1 - stats::pnorm(overallLogRankTestStatistics) } else { pValues <- stats::pnorm(logRankTestStatistics) overallPValues <- stats::pnorm(overallLogRankTestStatistics) } for (k in 1:stage) { # Inverse normal test combInverseNormal[k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(pValues[1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) # Fisher combination test combFisher[k] <- prod(pValues[1:k]^weightsFisher[1:k]) } stageResults <- StageResultsSurvival( design = design, dataInput = dataInput, stage = as.integer(stage), overallTestStatistics = .fillWithNAs(overallLogRankTestStatistics, design$kMax), overallPValues = .fillWithNAs(overallPValues, design$kMax), overallEvents = .fillWithNAs(overallEvents, design$kMax), overallAllocationRatios = .fillWithNAs(overallAllocationRatios, design$kMax), events = .fillWithNAs(events, design$kMax), allocationRatios = .fillWithNAs(allocationRatios, design$kMax), testStatistics = .fillWithNAs(logRankTestStatistics, design$kMax), pValues = .fillWithNAs(pValues, design$kMax), effectSizes = .fillWithNAs(effectSizes, design$kMax), combInverseNormal = combInverseNormal, combFisher = combFisher, weightsFisher = weightsFisher, weightsInverseNormal = weightsInverseNormal, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER) ) if (.isTrialDesignFisher(design)) { stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) stageResults$.setParameterType("weightsFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) stageResults$.setParameterType("weightsInverseNormal", C_PARAM_GENERATED) } return(stageResults) } #' #' Calculation of lower and upper limits of repeated confidence intervals (RCIs) for Survival #' #' @noRd #' .getRepeatedConfidenceIntervalsSurvival <- function(..., design) { if (.isTrialDesignGroupSequential(design)) { return(.getRepeatedConfidenceIntervalsSurvivalGroupSequential(design = design, ...)) } if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsSurvivalInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsSurvivalFisher(design = design, ...)) } .stopWithWrongDesignMessage(design, inclusiveConditionalDunnett = FALSE) } .getRootThetaSurvival <- function(..., design, dataInput, stage, directionUpper, thetaLow, thetaUp, firstParameterName, secondValue, tolerance, callingFunctionInformation) { result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsSurvival( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper ) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, callingFunctionInformation = callingFunctionInformation ) return(result) } .getUpperLowerThetaSurvival <- function(..., design, dataInput, theta, stage, directionUpper, conditionFunction, firstParameterName, secondValue) { stageResults <- .getStageResultsSurvival( design = design, dataInput = dataInput, stage = stage, thetaH0 = exp(theta), directionUpper = directionUpper ) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } maxSearchIterations <- 30 while (conditionFunction(secondValue, firstValue)) { theta <- 2 * theta stageResults <- .getStageResultsSurvival( design = design, dataInput = dataInput, stage = stage, thetaH0 = exp(theta), directionUpper = directionUpper ) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } maxSearchIterations <- maxSearchIterations - 1 if (maxSearchIterations < 0) { stop(sprintf( paste0( "Failed to find theta (k = %s, firstValue = %s, ", "secondValue = %s, levels(firstValue) = %s, theta = %s)" ), stage, stageResults[[firstParameterName]][stage], secondValue, firstValue, theta )) } } return(theta) } .getRepeatedConfidenceIntervalsSurvivalAll <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) # necessary for adjustment for binding futility boundaries futilityCorr <- rep(NA_real_, design$kMax) criticalValues <- design$criticalValues if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT conditionFunction <- .isFirstValueSmallerThanSecondValue } else { bounds <- design$futilityBounds criticalValues[is.infinite(criticalValues) & criticalValues > 0] <- C_QNORM_MAXIMUM criticalValues[is.infinite(criticalValues) & criticalValues < 0] <- C_QNORM_MINIMUM border <- C_FUTILITY_BOUNDS_DEFAULT conditionFunction <- .isFirstValueGreaterThanSecondValue } repeatedConfidenceIntervals <- matrix(NA_real_, 2, design$kMax) for (k in (1:stage)) { startTime <- Sys.time() if (criticalValues[k] < C_QNORM_MAXIMUM) { # Finding maximum upper and minimum lower bounds for RCIs thetaLow <- exp(.getUpperLowerThetaSurvival( design = design, dataInput = dataInput, theta = -1, stage = k, directionUpper = TRUE, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k] )) thetaUp <- exp(.getUpperLowerThetaSurvival( design = design, dataInput = dataInput, theta = 1, stage = k, directionUpper = FALSE, conditionFunction = conditionFunction, firstParameterName = firstParameterName, secondValue = criticalValues[k] )) # Finding upper and lower RCI limits through root function repeatedConfidenceIntervals[1, k] <- .getRootThetaSurvival( design = design, dataInput = dataInput, stage = k, directionUpper = C_DIRECTION_UPPER_DEFAULT, thetaLow = thetaLow, thetaUp = thetaUp, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, callingFunctionInformation = paste0("Repeated confidence interval [1, ", k, "]") ) repeatedConfidenceIntervals[2, k] <- .getRootThetaSurvival( design = design, dataInput = dataInput, stage = k, directionUpper = FALSE, thetaLow = thetaLow, thetaUp = thetaUp, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, callingFunctionInformation = paste0("Repeated confidence interval [2, ", k, "]") ) # Adjustment for binding futility bounds if (k > 1 && !is.na(bounds[k - 1]) && conditionFunction(bounds[k - 1], border) && design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "pValues", firstParameterName) futilityCorr[k] <- .getRootThetaSurvival( design = design, dataInput = dataInput, stage = k - 1, directionUpper = directionUpper, thetaLow = thetaLow, thetaUp = thetaUp, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance, callingFunctionInformation = paste0("Repeated confidence interval, futility correction [", k, "]") ) if (directionUpper) { repeatedConfidenceIntervals[1, k] <- min(min(futilityCorr[2:k]), repeatedConfidenceIntervals[1, k]) } else { repeatedConfidenceIntervals[2, k] <- max(max(futilityCorr[2:k]), repeatedConfidenceIntervals[2, k]) } } .logProgress("Repeated confidence interval of stage %s calculated", startTime = startTime, k) if (!is.na(repeatedConfidenceIntervals[1, k]) && !is.na(repeatedConfidenceIntervals[2, k]) && repeatedConfidenceIntervals[1, k] > repeatedConfidenceIntervals[2, k]) { repeatedConfidenceIntervals[, k] <- rep(NA_real_, 2) } } } return(repeatedConfidenceIntervals) } #' #' RCIs based on group sequential method #' #' @noRd #' .getRepeatedConfidenceIntervalsSurvivalGroupSequential <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsSurvivalGroupSequential", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) return(.getRepeatedConfidenceIntervalsSurvivalAll( design = design, dataInput = dataInput, firstParameterName = "overallPValues", directionUpper = directionUpper, tolerance = tolerance, ... )) } #' #' RCIs based on inverse normal combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsSurvivalInverseNormal <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsSurvivalInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) return(.getRepeatedConfidenceIntervalsSurvivalAll( design = design, dataInput = dataInput, firstParameterName = "combInverseNormal", directionUpper = directionUpper, tolerance = tolerance, ... )) } #' #' RCIs based on Fisher's combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsSurvivalFisher <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsSurvivalFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) return(.getRepeatedConfidenceIntervalsSurvivalAll( design = design, dataInput = dataInput, firstParameterName = "combFisher", directionUpper = directionUpper, tolerance = tolerance, ... )) } #' #' Calculation of conditional power based on group sequential method #' #' @noRd #' .getConditionalPowerSurvivalGroupSequential <- function(..., stageResults, stage = stageResults$stage, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_) { design <- stageResults$.design .assertIsTrialDesignGroupSequential(design) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerSurvivalGroupSequential", ignore = c("design", "stageResultsName"), ... ) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) weights <- stageResults$weightsInverseNormal informationRates <- design$informationRates nPlanned <- c(rep(NA, stageResults$stage), nPlanned) if (stageResults$stage == kMax) { .logDebug( "Conditional power will be calculated only for subsequent stages ", "(stage = ", stageResults$stage, ", kMax = ", design$kMax, ")" ) return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } criticalValuesInverseNormal <- design$criticalValues .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned if (stageResults$direction == "upper") { thetaH1 <- log(thetaH1 / stageResults$thetaH0) } else { thetaH1 <- -log(thetaH1 / stageResults$thetaH0) } # Shifted decision region for use in getGroupSeqProbs # Group sequential method shiftedDecisionRegionUpper <- criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% .getOneMinusQNorm(stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (design$sided == 2) { shiftedDecisionRegionLower <- -criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - .getOneMinusQNorm(stageResults$overallPValues[stage]) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) } if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - c(weights[1:stage] %*% .getOneMinusQNorm(stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # Scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) if (design$sided == 2) { decisionMatrix <- matrix(c(shiftedDecisionRegionLower, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) if (design$twoSidedPower) { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } #' #' Calculation of conditional power based on inverse normal method #' #' @noRd #' .getConditionalPowerSurvivalInverseNormal <- function(..., stageResults, stage = stageResults$stage, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_) { design <- stageResults$.design .assertIsTrialDesignInverseNormal(design) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerSurvivalInverseNormal", ignore = c("design", "stageResultsName"), ... ) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) weights <- stageResults$weightsInverseNormal informationRates <- design$informationRates nPlanned <- c(rep(NA, stageResults$stage), nPlanned) if (stageResults$stage == kMax) { .logDebug( "Conditional power will be calculated only for subsequent stages ", "(stage = ", stageResults$stage, ", kMax = ", design$kMax, ")" ) return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } criticalValuesInverseNormal <- design$criticalValues .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned if (stageResults$direction == "upper") { thetaH1 <- log(thetaH1 / stageResults$thetaH0) } else { thetaH1 <- -log(thetaH1 / stageResults$thetaH0) } # Shifted decision region for use in getGroupSeqProbs # Inverse normal method shiftedDecisionRegionUpper <- criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% .getOneMinusQNorm(stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (design$sided == 2) { shiftedDecisionRegionLower <- -criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - .getOneMinusQNorm(stageResults$overallPValues[stage]) * sqrt(sum(weights[1:stage]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) } if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - c(weights[1:stage] %*% .getOneMinusQNorm(stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # Scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) if (design$sided == 2) { decisionMatrix <- matrix(c(shiftedDecisionRegionLower, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) if (design$twoSidedPower) { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } #' #' Calculation of conditional power based on Fisher combination test #' #' @noRd #' .getConditionalPowerSurvivalFisher <- function(..., stageResults, stage = stageResults$stage, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, nPlanned = NA_real_, thetaH1 = NA_real_, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { design <- stageResults$.design .assertIsTrialDesignFisher(design) .assertIsValidStage(stage, design$kMax) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerSurvivalFisher", ignore = c("design", "piTreatmentRange", "stageResultsName"), ... ) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) seed <- .setSeed(seed) simulated <- FALSE nPlanned <- c(rep(NA, stageResults$stage), nPlanned) .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned if (stageResults$direction == "upper") { thetaH1 <- log(thetaH1 / stageResults$thetaH0) } else { thetaH1 <- -log(thetaH1 / stageResults$thetaH0) } criticalValues <- design$criticalValues weightsFisher <- stageResults$weightsFisher pValues <- stageResults$pValues if (stageResults$stage < kMax - 1) { for (k in (stageResults$stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = thetaH1, stage = stageResults$stage, nPlanned = nPlanned ) } conditionalPower[k] <- reject / iterations } simulated <- TRUE } if (stageResults$stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1 / weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Calculation not possible: could not calculate conditional power for stage ", kMax, call. = FALSE) conditionalPower[kMax] <- NA_real_ } else { conditionalPower[kMax] <- 1 - stats::pnorm(.getQNorm(result) - thetaH1 * sqrt(nPlanned[kMax])) } } nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned return(list( nPlanned = nPlanned, conditionalPower = conditionalPower, iterations = as.integer(iterations), seed = seed, simulated = simulated )) } .getConditionalPowerSurvival <- function(..., stageResults, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaH1 = NA_real_) { results <- ConditionalPowerResultsSurvival( .stageResults = stageResults, .design = stageResults$.design, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1 ) if (any(is.na(nPlanned))) { return(results) } stage <- stageResults$stage thetaH1 <- .assertIsValidThetaH1(thetaH1, stageResults, stage) .assertIsInOpenInterval(thetaH1, "thetaH1", 0, Inf) if (!.isValidNPlanned(nPlanned = nPlanned, kMax = stageResults$.design$kMax, stage = stage)) { return(results) } if (.isTrialDesignGroupSequential(stageResults$.design)) { cp <- .getConditionalPowerSurvivalGroupSequential( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, ... ) } else if (.isTrialDesignInverseNormal(stageResults$.design)) { cp <- .getConditionalPowerSurvivalInverseNormal( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, ... ) } else if (.isTrialDesignFisher(stageResults$.design)) { cp <- .getConditionalPowerSurvivalFisher( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaH1, ... ) results$iterations <- cp$iterations results$seed <- cp$seed results$simulated <- cp$simulated .updateParameterTypeOfIterationsAndSeed(results, ...) } else { .stopWithWrongDesignMessage(stageResults$.design, inclusiveConditionalDunnett = FALSE) } results$nPlanned <- cp$nPlanned results$conditionalPower <- cp$conditionalPower results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$.setParameterType( "allocationRatioPlanned", ifelse(allocationRatioPlanned == C_ALLOCATION_RATIO_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED) ) results$.setParameterType("thetaH1", ifelse(is.na(thetaH1), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) return(results) } .getConditionalPowerPlotSurvival <- function(..., stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, thetaRange) { .assertIsValidAllocationRatioPlanned(allocationRatioPlanned, 2) .associatedArgumentsAreDefined(nPlanned = nPlanned, thetaRange = thetaRange) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerPlotSurvival", ignore = c("iterations", "seed", "stageResultsName", "grid"), ... ) design <- stageResults$.design if (!.isValidNPlanned(nPlanned = nPlanned, kMax = design$kMax, stage = stage)) { return(list( xValues = 0, condPowerValues = 0, likelihoodValues = 0, main = C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, xlab = "Hazard ratio", ylab = C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD, sub = "" )) } thetaRange <- .assertIsValidThetaRange(thetaRange = thetaRange, survivalDataEnabled = TRUE) condPowerValues <- rep(NA, length(thetaRange)) likelihoodValues <- rep(NA, length(thetaRange)) warningMessages <- c() withCallingHandlers( for (i in seq(along = thetaRange)) { if (.isTrialDesignGroupSequential(design)) { condPowerValues[i] <- .getConditionalPowerSurvivalGroupSequential( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i] )$conditionalPower[design$kMax] } if (.isTrialDesignInverseNormal(design)) { condPowerValues[i] <- .getConditionalPowerSurvivalInverseNormal( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i] )$conditionalPower[design$kMax] } if (.isTrialDesignFisher(design)) { condPowerValues[i] <- .getConditionalPowerSurvivalFisher( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, thetaH1 = thetaRange[i] )$conditionalPower[design$kMax] } likelihoodValues[i] <- stats::dnorm( log(thetaRange[i]), log(stageResults$effectSizes[stage]), 2 / sqrt(stageResults$overallEvents[stage]) ) / stats::dnorm(0, 0, 2 / sqrt(stageResults$overallEvents[stage])) }, warning = function(w) { m <- w$message if (!(m %in% warningMessages)) { warningMessages <<- c(warningMessages, m) } invokeRestart("muffleWarning") }, error = function(e) { e } ) if (length(warningMessages) > 0) { for (m in warningMessages) { warning(m, call. = FALSE) } } subtitle <- paste0( "Stage = ", stage, ", maximum number of remaining events = ", sum(nPlanned), ", allocation ratio = ", .formatSubTitleValue(allocationRatioPlanned, "allocationRatioPlanned") ) return(list( xValues = thetaRange, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, xlab = "Hazard ratio", ylab = C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD, sub = subtitle )) } #' #' Calculation of final confidence interval #' based on group sequential test without SSR (general case). #' #' @noRd #' .getFinalConfidenceIntervalSurvivalGroupSequential <- function(..., design, dataInput, stage, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsSurvival( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper ) finalConfidenceIntervalGeneral <- rep(NA_real_, 2) medianUnbiasedGeneral <- NA_real_ stageGroupSeq <- .getStageGroupSeq(design = design, stageResults = stageResults, stage = stage) finalStage <- min(stageGroupSeq, design$kMax) # Early stopping or at end of study if (stageGroupSeq < design$kMax || stage == design$kMax) { if (stageGroupSeq == 1) { finalConfidenceIntervalGeneral[1] <- stageResults$testStatistics[1] - .getOneMinusQNorm(design$alpha / design$sided) finalConfidenceIntervalGeneral[2] <- stageResults$testStatistics[1] + .getOneMinusQNorm(design$alpha / design$sided) medianUnbiasedGeneral <- stageResults$testStatistics[1] } else { finalConfidenceIntervalGeneral[1] <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "finalConfidenceIntervalGeneralLower" ) finalConfidenceIntervalGeneral[2] <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "finalConfidenceIntervalGeneralUpper" ) medianUnbiasedGeneral <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "medianUnbiasedGeneral" ) } } if (is.na(finalConfidenceIntervalGeneral[1]) && (stageGroupSeq > 1)) { finalStage <- NA_integer_ } finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ if (!is.na(finalStage)) { # Retransformation y <- .getStageResultsSurvival( design = design, dataInput = dataInput, stage = finalStage, thetaH0 = thetaH0, directionUpper = directionUpper ) stderr <- (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) / sqrt(stageResults$overallEvents[finalStage]) directionUpperSign <- ifelse(directionUpper, 1, -1) if (stageGroupSeq == 1) { finalConfidenceInterval <- exp(stderr * finalConfidenceIntervalGeneral) medianUnbiased <- exp(stderr * medianUnbiasedGeneral) } else { finalConfidenceInterval[1] <- exp(finalConfidenceIntervalGeneral[1] * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) finalConfidenceInterval[2] <- exp(finalConfidenceIntervalGeneral[2] * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) medianUnbiased <- exp(medianUnbiasedGeneral * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) } } if (!directionUpper) { medianUnbiasedGeneral <- 1 / medianUnbiasedGeneral finalConfidenceIntervalGeneral <- 1 / finalConfidenceIntervalGeneral if (stageGroupSeq > 1) { medianUnbiased <- 1 / medianUnbiased finalConfidenceInterval <- 1 / finalConfidenceInterval } } if (!any(is.na(finalConfidenceIntervalGeneral))) { finalConfidenceIntervalGeneral <- sort(finalConfidenceIntervalGeneral) } if (!any(is.na(finalConfidenceInterval))) { finalConfidenceInterval <- sort(finalConfidenceInterval) } return(list( stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, tolerance = tolerance, finalStage = finalStage, medianUnbiasedGeneral = medianUnbiasedGeneral, finalConfidenceIntervalGeneral = finalConfidenceIntervalGeneral, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } #' #' Calculation of final confidence interval #' based on inverse normal method, only valid for kMax <= 2 or no SSR. #' #' @noRd #' .getFinalConfidenceIntervalSurvivalInverseNormal <- function(..., design, dataInput, stage, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsSurvival( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper ) finalConfidenceIntervalGeneral <- rep(NA_real_, 2) medianUnbiasedGeneral <- NA_real_ stageInvNormal <- .getStageInverseNormal(design = design, stageResults = stageResults, stage = stage) finalStage <- min(stageInvNormal, design$kMax) # Early stopping or at end of study if (stageInvNormal < design$kMax || stage == design$kMax) { if (stageInvNormal == 1) { finalConfidenceIntervalGeneral[1] <- stageResults$testStatistics[1] - .getOneMinusQNorm(design$alpha / design$sided) finalConfidenceIntervalGeneral[2] <- stageResults$testStatistics[1] + .getOneMinusQNorm(design$alpha / design$sided) medianUnbiasedGeneral <- stageResults$testStatistics[1] } else { if ((design$kMax > 2) && !.isNoEarlyEfficacy(design)) { message( "Calculation of final confidence interval performed for kMax = ", design$kMax, " (for kMax > 2, it is theoretically shown that it is valid only ", "if no sample size change was performed)" ) } finalConfidenceIntervalGeneral[1] <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "finalConfidenceIntervalGeneralLower" ) finalConfidenceIntervalGeneral[2] <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "finalConfidenceIntervalGeneralUpper" ) medianUnbiasedGeneral <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "medianUnbiasedGeneral" ) } } if (is.na(finalConfidenceIntervalGeneral[1]) && (stageInvNormal > 1)) { finalStage <- NA_integer_ } finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ if (!is.na(finalStage)) { # Retransformation y <- .getStageResultsSurvival( design = design, dataInput = dataInput, stage = finalStage, thetaH0 = thetaH0, directionUpper = directionUpper ) stderr <- (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) / sqrt(stageResults$overallEvents[finalStage]) directionUpperSign <- ifelse(directionUpper, 1, -1) if (stageInvNormal == 1) { finalConfidenceInterval <- exp(stderr * finalConfidenceIntervalGeneral) medianUnbiased <- exp(stderr * medianUnbiasedGeneral) } else { finalConfidenceInterval[1] <- exp(finalConfidenceIntervalGeneral[1] * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) finalConfidenceInterval[2] <- exp(finalConfidenceIntervalGeneral[2] * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) medianUnbiased <- exp(medianUnbiasedGeneral * (1 + y$overallAllocationRatios[finalStage]) / sqrt(y$overallAllocationRatios[finalStage]) + directionUpperSign * log(thetaH0)) } } if (!directionUpper) { medianUnbiasedGeneral <- 1 / medianUnbiasedGeneral finalConfidenceIntervalGeneral <- 1 / finalConfidenceIntervalGeneral if (stageInvNormal > 1) { medianUnbiased <- 1 / medianUnbiased finalConfidenceInterval <- 1 / finalConfidenceInterval } } return(list( stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, tolerance = tolerance, finalStage = finalStage, medianUnbiasedGeneral = medianUnbiasedGeneral, finalConfidenceIntervalGeneral = sort(finalConfidenceIntervalGeneral), medianUnbiased = medianUnbiased, finalConfidenceInterval = sort(finalConfidenceInterval) )) } #' #' Calculation of final confidence interval #' based on Fisher combination test, only valid for kMax <= 2. #' #' @noRd #' .getFinalConfidenceIntervalSurvivalFisher <- function(..., design, dataInput, stage, thetaH0 = C_THETA_H0_SURVIVAL_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsSurvival( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper ) stageFisher <- .getStageFisher(design = design, stageResults = stageResults, stage = stage) finalStage <- min(stageFisher, design$kMax) finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ # early stopping or at end of study if (stageFisher < design$kMax || stage == design$kMax) { message( "Calculation of final confidence interval for Fisher's ", "design not implemented yet" ) return(list( finalStage = NA_integer_, medianUnbiased = NA_real_, finalConfidenceInterval = rep(NA_real_, design$kMax) )) } return(list( stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, tolerance = tolerance, finalStage = finalStage, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } .getFinalConfidenceIntervalSurvival <- function(..., design, dataInput, thetaH0 = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .warnInCaseOfUnknownArguments( functionName = "getFinalConfidenceIntervalSurvival", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) if (design$kMax == 1) { return(list( finalStage = NA_integer_, medianUnbiasedGeneral = NA_real_, finalConfidenceIntervalGeneral = c(NA_real_, NA_real_), medianUnbiased = NA_real_, finalConfidenceInterval = c(NA_real_) )) } if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_SURVIVAL_DEFAULT } if (.isTrialDesignGroupSequential(design)) { return(.getFinalConfidenceIntervalSurvivalGroupSequential( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, tolerance = tolerance )) } if (.isTrialDesignInverseNormal(design)) { return(.getFinalConfidenceIntervalSurvivalInverseNormal( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, tolerance = tolerance )) } if (.isTrialDesignFisher(design)) { return(.getFinalConfidenceIntervalSurvivalFisher( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, tolerance = tolerance )) } .stopWithWrongDesignMessage(design, inclusiveConditionalDunnett = FALSE) } rpact/R/f_analysis_base_rates.R0000644000176200001440000023240014445307575016246 0ustar liggesusers## | ## | *Analysis of rates with group sequential and combination test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_logger.R NULL #' @title #' Get Analysis Results Rates #' #' @description #' Returns an analysis result object. #' #' @param design The trial design. #' #' @return Returns a \code{AnalysisResultsRates} object. #' #' @keywords internal #' #' @noRd #' .getAnalysisResultsRates <- function(..., design, dataInput) { if (.isTrialDesignGroupSequential(design)) { return(.getAnalysisResultsRatesGroupSequential( design = design, dataInput = dataInput, ... )) } if (.isTrialDesignInverseNormal(design)) { return(.getAnalysisResultsRatesInverseNormal( design = design, dataInput = dataInput, ... )) } if (.isTrialDesignFisher(design)) { return(.getAnalysisResultsRatesFisher( design = design, dataInput = dataInput, ... )) } .stopWithWrongDesignMessage(design, inclusiveConditionalDunnett = FALSE) } .getAnalysisResultsRatesInverseNormal <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, thetaH0 = C_THETA_H0_RATES_DEFAULT, pi1 = NA_real_, pi2 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignInverseNormal(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsRatesInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsInverseNormal(design = design, dataInput = dataInput) .getAnalysisResultsRatesAll( results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance ) return(results) } .getAnalysisResultsRatesGroupSequential <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, thetaH0 = C_THETA_H0_RATES_DEFAULT, pi1 = NA_real_, pi2 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .assertIsTrialDesignGroupSequential(design) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsRatesGroupSequential", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsGroupSequential(design = design, dataInput = dataInput) .getAnalysisResultsRatesAll( results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance ) return(results) } .getAnalysisResultsRatesFisher <- function(..., design, dataInput, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, thetaH0 = C_THETA_H0_RATES_DEFAULT, pi1 = NA_real_, pi2 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .warnInCaseOfUnknownArguments( functionName = ".getAnalysisResultsRatesFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) results <- AnalysisResultsFisher(design = design, dataInput = dataInput) .setValueAndParameterType(results, "iterations", as.integer(iterations), C_ITERATIONS_DEFAULT) .setValueAndParameterType(results, "seed", seed, NA_real_) .getAnalysisResultsRatesAll( results = results, design = design, dataInput = dataInput, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, thetaH0 = thetaH0, pi1 = pi1, pi2 = pi2, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, tolerance = tolerance, iterations = iterations, seed = seed ) return(results) } #' #' The following parameters will be taken from 'design': #' stages, informationRates, criticalValues, futilityBounds, alphaSpent, stageLevels #' #' @noRd #' .getAnalysisResultsRatesAll <- function(..., results, design, dataInput, stage, directionUpper, normalApproximation, thetaH0, pi1, pi2, nPlanned, allocationRatioPlanned, tolerance, iterations, seed) { startTime <- Sys.time() stageResults <- .getStageResultsRates( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation ) results$.setStageResults(stageResults) .logProgress("Stage results calculated", startTime = startTime) pi1User <- pi1 .assertIsSingleNumber(pi1, "pi1", naAllowed = TRUE) pi1 <- .assertIsValidPi1(pi1, stageResults, stage) if (identical(pi1, pi1User)) { .setValueAndParameterType(results, "pi1", pi1, NA_real_) } else { results$pi1 <- pi1 results$.setParameterType("pi1", C_PARAM_GENERATED) } if (dataInput$getNumberOfGroups() == 2) { pi2User <- pi2 .assertIsSingleNumber(pi2, "pi2", naAllowed = TRUE) pi2 <- .assertIsValidPi2(pi2, stageResults, stage) if (identical(pi2, pi2User)) { .setValueAndParameterType(results, "pi2", pi2, NA_real_) } else { results$pi2 <- pi2 results$.setParameterType("pi2", C_PARAM_GENERATED) } } else { if (!all(is.na(pi2))) { warning("'pi2' (", .arrayToString(pi2), ") will be ignored ", "because the specified data has only one group", call. = FALSE ) } results$pi2 <- NA_real_ results$.setParameterType("pi2", C_PARAM_NOT_APPLICABLE) } .warnInCaseOfUnusedConditionalPowerArgument(results, nPlanned, "pi1", pi1) .warnInCaseOfUnusedConditionalPowerArgument(results, nPlanned, "pi2", pi2) .setValueAndParameterType(results, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType( results, "normalApproximation", normalApproximation, C_NORMAL_APPROXIMATION_RATES_DEFAULT ) .setValueAndParameterType(results, "thetaH0", thetaH0, C_THETA_H0_RATES_DEFAULT) .setConditionalPowerArguments(results, dataInput, nPlanned, allocationRatioPlanned) # test actions results$testActions <- getTestActions(stageResults = stageResults) results$.setParameterType("testActions", C_PARAM_GENERATED) if (design$kMax > 1) { # conditional power startTime <- Sys.time() if (.isTrialDesignFisher(design)) { results$.conditionalPowerResults <- .getConditionalPowerRates( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = pi1, pi2 = pi2, iterations = iterations, seed = seed ) .synchronizeIterationsAndSeed(results) } else { results$.conditionalPowerResults <- .getConditionalPowerRates( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = pi1, pi2 = pi2 ) results$conditionalPower <- results$.conditionalPowerResults$conditionalPower results$.setParameterType("conditionalPower", C_PARAM_GENERATED) } .logProgress("Conditional power calculated", startTime = startTime) # CRP - conditional rejection probabilities startTime <- Sys.time() if (.isTrialDesignFisher(design) && isTRUE(.getOptionalArgument("simulateCRP", ...))) { results$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) seed <- results$.conditionalPowerResults$seed crp <- getConditionalRejectionProbabilities( stageResults = stageResults, iterations = iterations, seed = seed ) results$conditionalRejectionProbabilities <- crp$crpFisherSimulated paramTypeSeed <- results$.conditionalPowerResults$.getParameterType("seed") if (paramTypeSeed != C_PARAM_TYPE_UNKNOWN) { results$.setParameterType("seed", paramTypeSeed) } results$seed <- seed } else { results$conditionalRejectionProbabilities <- getConditionalRejectionProbabilities(stageResults = stageResults) } results$.setParameterType("conditionalRejectionProbabilities", C_PARAM_GENERATED) .logProgress("Conditional rejection probabilities (CRP) calculated", startTime = startTime) } # RCI - repeated confidence interval startTime <- Sys.time() repeatedConfidenceIntervals <- .getRepeatedConfidenceIntervalsRates( design = design, dataInput = dataInput, stage = stage, normalApproximation = normalApproximation, tolerance = tolerance ) results$repeatedConfidenceIntervalLowerBounds <- repeatedConfidenceIntervals[1, ] results$repeatedConfidenceIntervalUpperBounds <- repeatedConfidenceIntervals[2, ] results$.setParameterType("repeatedConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("repeatedConfidenceIntervalUpperBounds", C_PARAM_GENERATED) .logProgress("Repeated confidence interval calculated", startTime = startTime) # repeated p-value startTime <- Sys.time() results$repeatedPValues <- getRepeatedPValues( stageResults = stageResults, tolerance = tolerance ) results$.setParameterType("repeatedPValues", C_PARAM_GENERATED) .logProgress("Repeated p-values calculated", startTime = startTime) if (design$kMax > 1) { # final p-value startTime <- Sys.time() finalPValue <- getFinalPValue(stageResults, showWarnings = FALSE) results$finalPValues <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalPValue$pFinal, finalStage = finalPValue$finalStage ) results$.setParameterType("finalPValues", C_PARAM_GENERATED) results$finalStage <- finalPValue$finalStage results$.setParameterType("finalPValues", C_PARAM_GENERATED) results$.setParameterType("finalStage", C_PARAM_GENERATED) .logProgress("Final p-value calculated", startTime = startTime) # final confidence interval & median unbiased estimate startTime <- Sys.time() finalConfidenceIntervals <- .getFinalConfidenceIntervalRates( design = design, dataInput = dataInput, thetaH0 = thetaH0, stage = stage, directionUpper = directionUpper, normalApproximation = normalApproximation, tolerance = tolerance ) if (!is.null(finalConfidenceIntervals)) { finalStage <- finalConfidenceIntervals$finalStage results$finalConfidenceIntervalLowerBounds <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[1], finalStage = finalStage ) results$finalConfidenceIntervalUpperBounds <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalConfidenceIntervals$finalConfidenceInterval[2], finalStage = finalStage ) results$medianUnbiasedEstimates <- .getVectorWithFinalValueAtFinalStage( kMax = design$kMax, finalValue = finalConfidenceIntervals$medianUnbiased, finalStage = finalStage ) results$.setParameterType("finalConfidenceIntervalLowerBounds", C_PARAM_GENERATED) results$.setParameterType("finalConfidenceIntervalUpperBounds", C_PARAM_GENERATED) results$.setParameterType("medianUnbiasedEstimates", C_PARAM_GENERATED) .logProgress("Final confidence interval calculated", startTime = startTime) } } return(results) } #' @title #' Get Stage Results Rates #' #' @description #' Returns a stage results object. #' #' @param design the trial design. #' #' @return Returns a \code{StageResultsRates} object. #' #' @keywords internal #' #' @noRd #' .getStageResultsRates <- function(..., design, dataInput, thetaH0 = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, stage = NA_integer_, userFunctionCallEnabled = FALSE) { .assertIsDatasetRates(dataInput) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .assertIsValidDirectionUpper(directionUpper, design$sided, userFunctionCallEnabled = userFunctionCallEnabled ) .assertIsSingleLogical(normalApproximation, "normalApproximation") .warnInCaseOfUnknownArguments( functionName = "getStageResultsRates", ignore = .getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), ... ) stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design, stage = stage) effectSizes <- rep(NA_real_, design$kMax) if (dataInput$getNumberOfGroups() == 1) { if (is.na(thetaH0)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'thetaH0' must be defined") } if (normalApproximation) { overallTestStatistics <- c((dataInput$getOverallEventsUpTo(stage) / dataInput$getOverallSampleSizesUpTo(stage) - thetaH0) / sqrt(thetaH0 * (1 - thetaH0)) * sqrt(dataInput$getOverallSampleSizesUpTo(stage)), rep(NA_real_, design$kMax - stage)) if (directionUpper) { overallPValues <- 1 - stats::pnorm(overallTestStatistics) } else { overallPValues <- stats::pnorm(overallTestStatistics) } } else { overallTestStatistics <- rep(NA_real_, design$kMax) if (directionUpper) { overallPValues <- stats::pbinom(dataInput$getOverallEventsUpTo(stage) - 1, dataInput$getOverallSampleSizesUpTo(stage), thetaH0, lower.tail = FALSE ) } else { overallPValues <- stats::pbinom(dataInput$getOverallEventsUpTo(stage), dataInput$getOverallSampleSizesUpTo(stage), thetaH0, lower.tail = TRUE ) } overallTestStatistics <- .getOneMinusQNorm(overallPValues) } effectSizes[1:stage] <- dataInput$getOverallEventsUpTo(stage) / dataInput$getOverallSampleSizesUpTo(stage) } if (dataInput$getNumberOfGroups() == 2) { if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_RATES_DEFAULT } overallEvents1 <- dataInput$getOverallEvents(group = 1) overallEvents2 <- dataInput$getOverallEvents(group = 2) overallTestStatistics <- rep(NA_real_, design$kMax) overallPValues <- rep(NA_real_, design$kMax) for (k in 1:stage) { if (normalApproximation) { if (thetaH0 == 0) { if ((overallEvents1[k] + overallEvents2[k] == 0) || (overallEvents1[k] + overallEvents2[k] == sum(dataInput$getSampleSizesUpTo(k, 1)) + sum(dataInput$getSampleSizesUpTo(k, 2)))) { overallTestStatistics[k] <- 0 } else { overallRateH0 <- (overallEvents1[k] + overallEvents2[k]) / (sum(dataInput$getSampleSizesUpTo(k, 1)) + sum(dataInput$getSampleSizesUpTo(k, 2))) overallTestStatistics[k] <- (overallEvents1[k] / sum(dataInput$getSampleSizesUpTo(k, 1)) - overallEvents2[k] / sum(dataInput$getSampleSizesUpTo(k, 2)) - thetaH0) / sqrt(overallRateH0 * (1 - overallRateH0) * (1 / sum(dataInput$getSampleSizesUpTo(k, 1)) + 1 / sum(dataInput$getSampleSizesUpTo(k, 2)))) } } else { y <- .getFarringtonManningValues( rate1 = overallEvents1[k] / sum(dataInput$getSampleSizesUpTo(k, 1)), rate2 = overallEvents2[k] / sum(dataInput$getSampleSizesUpTo(k, 2)), theta = thetaH0, allocation = sum(dataInput$getSampleSizesUpTo(k, 1)) / sum(dataInput$getSampleSizesUpTo(k, 2)), "diff" ) overallTestStatistics[k] <- (overallEvents1[k] / sum(dataInput$getSampleSizesUpTo(k, 1)) - overallEvents2[k] / sum(dataInput$getSampleSizesUpTo(k, 2)) - thetaH0) / sqrt(y$ml1 * (1 - y$ml1) / sum(dataInput$getSampleSizesUpTo(k, 1)) + y$ml2 * (1 - y$ml2) / sum(dataInput$getSampleSizesUpTo(k, 2))) } if (directionUpper) { overallPValues[k] <- 1 - stats::pnorm(overallTestStatistics[k]) } else { overallPValues[k] <- stats::pnorm(overallTestStatistics[k]) } } else { if (thetaH0 != 0) { stop( C_EXCEPTION_TYPE_CONFLICTING_ARGUMENTS, "thetaH0 must be equal 0 for performing Fisher's exact test" ) } if (directionUpper) { overallPValues[k] <- stats::phyper(overallEvents1[k] - 1, overallEvents1[k] + overallEvents2[k], sum(dataInput$getSampleSizesUpTo(k, 1)) + sum(dataInput$getSampleSizesUpTo(k, 2)) - overallEvents1[k] - overallEvents2[k], sum(dataInput$getSampleSizesUpTo(k, 1)), lower.tail = FALSE ) } else { overallPValues[k] <- stats::phyper(overallEvents1[k], overallEvents1[k] + overallEvents2[k], sum(dataInput$getSampleSizesUpTo(k, 1)) + sum(dataInput$getSampleSizesUpTo(k, 2)) - overallEvents1[k] - overallEvents2[k], sum(dataInput$getSampleSizesUpTo(k, 1)), lower.tail = TRUE ) } overallTestStatistics <- .getOneMinusQNorm(overallPValues) } } effectSizes[1:stage] <- overallEvents1[1:stage] / cumsum(dataInput$getSampleSizesUpTo(stage, 1)) - overallEvents2[1:stage] / cumsum(dataInput$getSampleSizesUpTo(stage, 2)) } # calculation of stage-wise test statistics and combination tests testStatistics <- rep(NA_real_, design$kMax) pValues <- rep(NA_real_, design$kMax) combInverseNormal <- rep(NA_real_, design$kMax) combFisher <- rep(NA_real_, design$kMax) weightsInverseNormal <- .getWeightsInverseNormal(design) weightsFisher <- .getWeightsFisher(design) for (k in 1:stage) { if (dataInput$getNumberOfGroups() == 1) { if (normalApproximation) { # stage-wise test statistics testStatistics[k] <- (dataInput$getEvent(k) / dataInput$getSampleSize(k) - thetaH0) / sqrt(thetaH0 * (1 - thetaH0)) * sqrt(dataInput$getSampleSize(k)) pValues[k] <- 1 - stats::pnorm(testStatistics[k]) if (!directionUpper) { pValues[k] <- 1 - pValues[k] } } else { testStatistics[k] <- NA_real_ if (directionUpper) { pValues[k] <- stats::pbinom(dataInput$getEvent(k) - 1, dataInput$getSampleSize(k), thetaH0, lower.tail = FALSE ) } else { pValues[k] <- stats::pbinom(dataInput$getEvent(k), dataInput$getSampleSize(k), thetaH0, lower.tail = TRUE ) } } } else if (dataInput$getNumberOfGroups() == 2) { if (normalApproximation) { # stage-wise test statistics if (thetaH0 == 0) { if ((dataInput$getEvent(k, 1) + dataInput$getEvent(k, 2) == 0) || (dataInput$getEvent(k, 1) + dataInput$getEvent(k, 2) == dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2))) { testStatistics[k] <- 0 } else { rateH0 <- (dataInput$getEvent(k, 1) + dataInput$getEvent(k, 2)) / (dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2)) testStatistics[k] <- (dataInput$getEvent(k, 1) / dataInput$getSampleSize(k, 1) - dataInput$getEvent(k, 2) / dataInput$getSampleSize(k, 2) - thetaH0) / sqrt(rateH0 * (1 - rateH0) * (1 / dataInput$getSampleSize(k, 1) + 1 / dataInput$getSampleSize(k, 2))) } } else { y <- .getFarringtonManningValues( rate1 = dataInput$getEvent(k, 1) / dataInput$getSampleSize(k, 1), rate2 = dataInput$getEvent(k, 2) / dataInput$getSampleSize(k, 2), theta = thetaH0, allocation = dataInput$getSampleSize(k, 1) / dataInput$getSampleSize(k, 2), method = "diff" ) testStatistics[k] <- (dataInput$getEvent(k, 1) / dataInput$getSampleSize(k, 1) - dataInput$getEvent(k, 2) / dataInput$getSampleSize(k, 2) - thetaH0) / sqrt(y$ml1 * (1 - y$ml1) / dataInput$getSampleSize(k, 1) + y$ml2 * (1 - y$ml2) / dataInput$getSampleSize(k, 2)) } if (directionUpper) { pValues[k] <- 1 - stats::pnorm(testStatistics[k]) } else { pValues[k] <- stats::pnorm(testStatistics[k]) } } else { testStatistics[k] <- NA_real_ if (directionUpper) { pValues[k] <- stats::phyper(dataInput$getEvent(k, 1) - 1, dataInput$getEvent(k, 1) + dataInput$getEvent(k, 2), dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2) - dataInput$getEvent(k, 1) - dataInput$getEvent(k, 2), dataInput$getSampleSize(k, 1), lower.tail = FALSE ) } else { pValues[k] <- stats::phyper(dataInput$getEvent(k, 1), dataInput$getEvent(k, 1) + dataInput$getEvent(k, 2), dataInput$getSampleSize(k, 1) + dataInput$getSampleSize(k, 2) - dataInput$getEvent(k, 1) - dataInput$getEvent(k, 2), dataInput$getSampleSize(k, 1), lower.tail = TRUE ) } } } # inverse normal test combInverseNormal[k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(pValues[1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) # Fisher combination test combFisher[k] <- prod(pValues[1:k]^weightsFisher[1:k]) } direction <- ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER) stageResults <- StageResultsRates( design = design, dataInput = dataInput, stage = as.integer(stage), overallTestStatistics = .fillWithNAs(overallTestStatistics, design$kMax), overallPValues = .fillWithNAs(overallPValues, design$kMax), effectSizes = effectSizes, overallEvents = .fillWithNAs(dataInput$getOverallEventsUpTo(stage, group = 1), design$kMax), overallSampleSizes = .fillWithNAs(dataInput$getOverallSampleSizesUpTo(stage, 1), design$kMax), testStatistics = testStatistics, pValues = pValues, combInverseNormal = combInverseNormal, combFisher = combFisher, weightsInverseNormal = weightsInverseNormal, weightsFisher = weightsFisher, thetaH0 = thetaH0, direction = ifelse(directionUpper, C_DIRECTION_UPPER, C_DIRECTION_LOWER), normalApproximation = normalApproximation ) if (dataInput$getNumberOfGroups() == 1) { stageResults$overallEvents <- .fillWithNAs(dataInput$getOverallEventsUpTo(stage, group = 1), design$kMax) stageResults$overallSampleSizes <- .fillWithNAs(dataInput$getOverallSampleSizesUpTo(stage, 1), design$kMax) stageResults$overallPi1 <- stageResults$overallEvents / stageResults$overallSampleSizes stageResults$.setParameterType("overallPi1", C_PARAM_GENERATED) } else if (dataInput$getNumberOfGroups() == 2) { stageResults$overallEvents1 <- .fillWithNAs(dataInput$getOverallEventsUpTo(stage, group <- 1), design$kMax) stageResults$overallEvents2 <- .fillWithNAs(dataInput$getOverallEventsUpTo(stage, group <- 2), design$kMax) stageResults$overallSampleSizes1 <- .fillWithNAs(dataInput$getOverallSampleSizesUpTo(stage, 1), design$kMax) stageResults$overallSampleSizes2 <- .fillWithNAs(dataInput$getOverallSampleSizesUpTo(stage, 2), design$kMax) stageResults$overallPi1 <- stageResults$overallEvents1 / stageResults$overallSampleSizes1 stageResults$overallPi2 <- stageResults$overallEvents2 / stageResults$overallSampleSizes2 stageResults$.setParameterType("overallPi1", C_PARAM_GENERATED) stageResults$.setParameterType("overallPi2", C_PARAM_GENERATED) } if (.isTrialDesignFisher(design)) { stageResults$.setParameterType("combFisher", C_PARAM_GENERATED) stageResults$.setParameterType("weightsFisher", C_PARAM_GENERATED) } else if (.isTrialDesignInverseNormal(design)) { stageResults$.setParameterType("combInverseNormal", C_PARAM_GENERATED) stageResults$.setParameterType("weightsInverseNormal", C_PARAM_GENERATED) } return(stageResults) } #' #' Calculation of lower and upper limits of repeated confidence intervals (RCIs) for Rates #' #' @noRd #' .getRepeatedConfidenceIntervalsRates <- function(..., design) { if (.isTrialDesignGroupSequential(design)) { return(.getRepeatedConfidenceIntervalsRatesGroupSequential(design = design, ...)) } if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedConfidenceIntervalsRatesInverseNormal(design = design, ...)) } if (.isTrialDesignFisher(design)) { return(.getRepeatedConfidenceIntervalsRatesFisher(design = design, ...)) } .stopWithWrongDesignMessage(design, inclusiveConditionalDunnett = FALSE) } .getRootThetaRates <- function(..., design, dataInput, stage, directionUpper, normalApproximation, firstParameterName, secondValue, tolerance, acceptResultsOutOfTolerance, callingFunctionInformation) { if (dataInput$getNumberOfGroups() == 2) { thetaLow <- -1 + tolerance } else { thetaLow <- tolerance } thetaUp <- 1 - tolerance if (dataInput$getNumberOfGroups() == 1 && !normalApproximation) { acceptResultsOutOfTolerance <- FALSE } result <- .getOneDimensionalRoot( function(theta) { stageResults <- .getStageResultsRates( design = design, dataInput = dataInput, stage = stage, thetaH0 = theta, directionUpper = directionUpper, normalApproximation = normalApproximation ) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } return(firstValue - secondValue) }, lower = thetaLow, upper = thetaUp, tolerance = tolerance, acceptResultsOutOfTolerance = acceptResultsOutOfTolerance, callingFunctionInformation = callingFunctionInformation ) return(result) } .getRepeatedConfidenceIntervalsRatesAll <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, firstParameterName) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) if (!normalApproximation && dataInput$getNumberOfGroups() == 2) { normalApproximation <- TRUE message("Repeated confidence intervals will be calculated under the normal approximation") } futilityCorr <- rep(NA_real_, design$kMax) # necessary for adjustment for binding futility boundaries criticalValues <- design$criticalValues if (.isTrialDesignFisher(design)) { bounds <- design$alpha0Vec border <- C_ALPHA_0_VEC_DEFAULT conditionFunction <- .isFirstValueSmallerThanSecondValue } else { criticalValues[is.infinite(criticalValues) & criticalValues > 0] <- C_QNORM_MAXIMUM criticalValues[is.infinite(criticalValues) & criticalValues < 0] <- C_QNORM_MINIMUM bounds <- design$futilityBounds border <- C_FUTILITY_BOUNDS_DEFAULT conditionFunction <- .isFirstValueGreaterThanSecondValue } repeatedConfidenceIntervals <- matrix(NA_real_, 2, design$kMax) for (k in (1:stage)) { startTime <- Sys.time() if (criticalValues[k] < C_QNORM_MAXIMUM) { # finding upper and lower RCI limits through root function if (dataInput$getNumberOfGroups() == 1) { if (dataInput$overallEvents[k] == 0) { repeatedConfidenceIntervals[1, k] <- 0 } else { repeatedConfidenceIntervals[1, k] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = k, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = normalApproximation, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, acceptResultsOutOfTolerance = TRUE, callingFunctionInformation = paste0("Repeated confidence interval [1, ", k, "]") ) } if (dataInput$overallEvents[k] == dataInput$overallSampleSizes[k]) { repeatedConfidenceIntervals[2, k] <- 1 } else { repeatedConfidenceIntervals[2, k] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, acceptResultsOutOfTolerance = TRUE, callingFunctionInformation = paste0("Repeated confidence interval [2, ", k, "]") ) } } else if (dataInput$getNumberOfGroups() == 2) { repeatedConfidenceIntervals[1, k] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = k, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = normalApproximation, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, acceptResultsOutOfTolerance = TRUE, callingFunctionInformation = paste0("Repeated confidence interval [1, ", k, "]") ) repeatedConfidenceIntervals[2, k] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = k, directionUpper = FALSE, normalApproximation = normalApproximation, firstParameterName = firstParameterName, secondValue = criticalValues[k], tolerance = tolerance, acceptResultsOutOfTolerance = TRUE, callingFunctionInformation = paste0("Repeated confidence interval [1, ", k, "]") ) } # adjustment for binding futility bounds if (k > 1 && !is.na(bounds[k - 1]) && conditionFunction(bounds[k - 1], border) && design$bindingFutility) { parameterName <- ifelse(.isTrialDesignFisher(design), "pValues", firstParameterName) futilityCorr[k] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = k - 1, directionUpper = directionUpper, normalApproximation = normalApproximation, firstParameterName = parameterName, secondValue = bounds[k - 1], tolerance = tolerance, acceptResultsOutOfTolerance = TRUE, callingFunctionInformation = paste0("Repeated confidence interval, futility correction [", k, "]") ) if (directionUpper) { repeatedConfidenceIntervals[1, k] <- min(min(futilityCorr[2:k]), repeatedConfidenceIntervals[1, k]) } else { repeatedConfidenceIntervals[2, k] <- max(max(futilityCorr[2:k]), repeatedConfidenceIntervals[2, k]) } } .logProgress("Repeated confidence interval of stage %s calculated", startTime = startTime, k) } if (!is.na(repeatedConfidenceIntervals[1, k]) && !is.na(repeatedConfidenceIntervals[2, k]) && repeatedConfidenceIntervals[1, k] > repeatedConfidenceIntervals[2, k]) { repeatedConfidenceIntervals[, k] <- rep(NA_real_, 2) } } return(repeatedConfidenceIntervals) } #' #' RCIs based on group sequential method #' #' @noRd #' .getRepeatedConfidenceIntervalsRatesGroupSequential <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsRatesGroupSequential", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsRatesAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, directionUpper = directionUpper, firstParameterName = "overallPValues", tolerance = tolerance, ... )) } #' #' RCIs based on inverse normal combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsRatesInverseNormal <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsRatesInverseNormal", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsRatesAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, directionUpper = directionUpper, firstParameterName = "combInverseNormal", tolerance = tolerance, ... )) } #' #' RCIs based on Fisher's combination test #' #' @noRd #' .getRepeatedConfidenceIntervalsRatesFisher <- function(..., design, dataInput, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments( functionName = ".getRepeatedConfidenceIntervalsRatesFisher", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "stage"), ... ) return(.getRepeatedConfidenceIntervalsRatesAll( design = design, dataInput = dataInput, normalApproximation = normalApproximation, directionUpper = directionUpper, firstParameterName = "combFisher", tolerance = tolerance, ... )) } .calculateThetaH1 <- function(stageResults, pi1, pi2, stage, kMax, nPlanned, allocationRatioPlanned) { # Shifted decision region for use in getGroupSequentialProbabilities # Inverse normal method condError <- getConditionalRejectionProbabilities(stageResults = stageResults)[stage] if (stageResults$isOneSampleDataset()) { if (condError < 1e-12) { adjustment <- 0 } else { adjustment <- .getOneMinusQNorm(condError) * (1 - sqrt(stageResults$thetaH0 * (1 - stageResults$thetaH0)) / sqrt(pi1 * (1 - pi1))) / sqrt(sum(nPlanned[(stage + 1):kMax])) } if (stageResults$direction == "upper") { thetaH1 <- (pi1 - stageResults$thetaH0) / sqrt(pi1 * (1 - pi1)) + adjustment } else { thetaH1 <- -(pi1 - stageResults$thetaH0) / sqrt(pi1 * (1 - pi1)) + adjustment } return(list(thetaH1 = thetaH1, nPlanned = nPlanned)) } .assertIsSingleNumber(allocationRatioPlanned, "allocationRatioPlanned") .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM) x <- .getFarringtonManningValues( rate1 = pi1, rate2 = pi2, theta = stageResults$thetaH0, allocation = allocationRatioPlanned ) if (condError < 1e-12) { adjustment <- 0 } else { adjustment <- .getOneMinusQNorm(condError) * (1 - sqrt(x$ml1 * (1 - x$ml1) + allocationRatioPlanned * x$ml2 * (1 - x$ml2)) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2))) * (1 + allocationRatioPlanned) / sqrt(allocationRatioPlanned * sum(nPlanned[(stage + 1):kMax])) } if (stageResults$direction == "upper") { thetaH1 <- (pi1 - pi2 - stageResults$thetaH0) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2)) * sqrt(1 + allocationRatioPlanned) + adjustment } else { thetaH1 <- -(pi1 - pi2 - stageResults$thetaH0) / sqrt(pi1 * (1 - pi1) + allocationRatioPlanned * pi2 * (1 - pi2)) * sqrt(1 + allocationRatioPlanned) + adjustment } nPlanned <- allocationRatioPlanned / (1 + allocationRatioPlanned)^2 * nPlanned return(list(thetaH1 = thetaH1, nPlanned = nPlanned)) } #' #' Calculation of conditional power based on group sequential / inverse normal method #' #' @noRd #' .getConditionalPowerRatesInverseNormalOrGroupSequential <- function(..., stageResults, stage = stageResults$stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, pi1, pi2) { design <- stageResults$.design .assertIsTrialDesignInverseNormalOrGroupSequential(design) .assertIsValidStage(stage, design$kMax) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerRatesInverseNormalOrGroupSequential", ignore = c("design", "stageResultsName", "grid", "pi1H1", "pi2H1"), ... ) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) weights <- stageResults$weightsInverseNormal informationRates <- design$informationRates nPlanned <- c(rep(NA, stage), nPlanned) if (stage == kMax) { .logDebug( "Conditional power will be calculated only for subsequent stages ", "(stage = ", stage, ", kMax = ", design$kMax, ")" ) return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } criticalValuesInverseNormal <- design$criticalValues resultList <- .calculateThetaH1(stageResults, pi1, pi2, stage, kMax, nPlanned, allocationRatioPlanned) thetaH1 <- resultList$thetaH1 nPlanned <- resultList$nPlanned shiftedDecisionRegionUpper <- criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% .getOneMinusQNorm(stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) if (design$sided == 2) { shiftedDecisionRegionLower <- -criticalValuesInverseNormal[(stage + 1):kMax] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):kMax]^2)) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - c(weights[1:stage] %*% .getOneMinusQNorm(stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):kMax]) * weights[(stage + 1):kMax]) / sqrt(cumsum(weights[(stage + 1):kMax]^2)) } if (stage == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- design$futilityBounds[(stage + 1):(kMax - 1)] * sqrt(sum(weights[1:stage]^2) + cumsum(weights[(stage + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - c(weights[1:stage] %*% .getOneMinusQNorm(stageResults$pValues[1:stage])) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) - thetaH1 * cumsum(sqrt(nPlanned[(stage + 1):(kMax - 1)]) * weights[(stage + 1):(kMax - 1)]) / sqrt(cumsum(weights[(stage + 1):(kMax - 1)]^2)) } # Scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(stage + 1):kMax] - informationRates[stage]) / (1 - informationRates[stage]) if (design$sided == 2) { decisionMatrix <- matrix(c(shiftedDecisionRegionLower, shiftedDecisionRegionUpper), nrow = 2, byrow = TRUE) } else { decisionMatrix <- matrix(c( shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecisionRegionUpper ), nrow = 2, byrow = TRUE) } probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) if (design$twoSidedPower) { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ] + probs[1, ]) } else { conditionalPower[(stage + 1):kMax] <- cumsum(probs[3, ] - probs[2, ]) } if (stageResults$isTwoSampleDataset()) { nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned } return(list( nPlanned = nPlanned, conditionalPower = conditionalPower )) } #' #' Calculation of conditional power based on Fisher combination test #' #' @noRd #' .getConditionalPowerRatesFisher <- function(..., stageResults, stage = stageResults$stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, pi1, pi2, iterations = C_ITERATIONS_DEFAULT, seed = NA_real_) { design <- stageResults$.design .assertIsTrialDesignFisher(design) .assertIsValidStage(stage, design$kMax) .assertIsValidIterationsAndSeed(iterations, seed, zeroIterationsAllowed = FALSE) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerRatesFisher", ignore = c("design", "stageResultsName", "grid", "pi1H1", "pi2H1"), ... ) kMax <- design$kMax conditionalPower <- rep(NA_real_, kMax) seed <- .setSeed(seed) simulated <- FALSE nPlanned <- c(rep(NA, stage), nPlanned) resultList <- .calculateThetaH1(stageResults, pi1, pi2, stage, kMax, nPlanned, allocationRatioPlanned) thetaH1 <- resultList$thetaH1 nPlanned <- resultList$nPlanned criticalValues <- design$criticalValues weightsFisher <- stageResults$weightsFisher pValues <- stageResults$pValues if (stage < kMax - 1) { for (k in (stage + 1):kMax) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueConditionalPowerFisher( kMax = kMax, alpha0Vec = design$alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, pValues = pValues, currentKMax = k, thetaH1 = thetaH1, stage = stage, nPlanned = nPlanned ) } conditionalPower[k] <- reject / iterations } simulated <- TRUE } if (stage == kMax - 1) { divisor <- prod(pValues[1:(kMax - 1)]^weightsFisher[1:(kMax - 1)]) result <- 1 - (criticalValues[kMax] / divisor)^(1 / weightsFisher[kMax]) if (result <= 0 || result >= 1) { warning("Calculation not possible: could not calculate conditional power for stage ", kMax, call. = FALSE) conditionalPower[kMax] <- NA_real_ } else { conditionalPower[kMax] <- 1 - stats::pnorm(.getQNorm(result) - thetaH1 * sqrt(nPlanned[kMax])) } } if (stageResults$isTwoSampleDataset()) { nPlanned <- (1 + allocationRatioPlanned)^2 / allocationRatioPlanned * nPlanned } return(list( nPlanned = nPlanned, conditionalPower = conditionalPower, iterations = as.integer(iterations), seed = seed, simulated = simulated )) } .getConditionalPowerRates <- function(..., stageResults, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, pi1 = NA_real_, pi2 = NA_real_) { pi1H1 <- .getOptionalArgument("pi1H1", ...) if (!is.null(pi1H1) && !is.na(pi1H1)) { if (!is.na(pi1)) { warning(sQuote("pi1"), " will be ignored because ", sQuote("pi1H1"), " is defined", call. = FALSE) } pi1 <- pi1H1 } pi2H1 <- .getOptionalArgument("pi2H1", ...) if (!is.null(pi2H1) && !is.na(pi2H1)) { if (!is.na(pi2)) { warning(sQuote("pi2"), " will be ignored because ", sQuote("pi2H1"), " is defined", call. = FALSE) } pi2 <- pi2H1 } stage <- stageResults$stage pi1 <- .assertIsValidPi1(pi1, stageResults, stage) if (!stageResults$isOneSampleDataset()) { pi2 <- .assertIsValidPi2(pi2, stageResults, stage) } results <- ConditionalPowerResultsRates( .stageResults = stageResults, .design = stageResults$.design, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = pi1, pi2 = pi2 ) if (any(is.na(nPlanned))) { return(results) } if (!.isValidNPlanned(nPlanned = nPlanned, kMax = stageResults$.design$kMax, stage = stage)) { return(results) } if (.isTrialDesignInverseNormalOrGroupSequential(stageResults$.design)) { cp <- .getConditionalPowerRatesInverseNormalOrGroupSequential(..., stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = pi1, pi2 = pi2 ) } else if (.isTrialDesignFisher(stageResults$.design)) { cp <- .getConditionalPowerRatesFisher(..., stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = pi1, pi2 = pi2 ) results$iterations <- cp$iterations results$seed <- cp$seed results$simulated <- cp$simulated .updateParameterTypeOfIterationsAndSeed(results, ...) } else { .stopWithWrongDesignMessage(stageResults$.design, inclusiveConditionalDunnett = FALSE) } results$nPlanned <- cp$nPlanned results$conditionalPower <- cp$conditionalPower results$.setParameterType("nPlanned", C_PARAM_GENERATED) results$.setParameterType("conditionalPower", C_PARAM_GENERATED) results$.setParameterType( "allocationRatioPlanned", ifelse(allocationRatioPlanned == C_ALLOCATION_RATIO_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED) ) results$.setParameterType("pi1", ifelse(is.na(pi1), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) results$.setParameterType("pi2", ifelse(is.na(pi2), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) return(results) } .getConditionalPowerPlotRates <- function(..., stageResults, stage, nPlanned, allocationRatioPlanned = C_ALLOCATION_RATIO_DEFAULT, piTreatmentRange, pi2) { if (stageResults$isOneSampleDataset()) { .associatedArgumentsAreDefined(nPlanned = nPlanned, piTreatmentRange = piTreatmentRange) pi2 <- NA_real_ } else { .associatedArgumentsAreDefined(nPlanned = nPlanned, pi2 = pi2, piTreatmentRange = piTreatmentRange) } .assertIsValidAllocationRatioPlanned( allocationRatioPlanned, stageResults$getDataInput()$getNumberOfGroups() ) .assertIsValidPi(pi2, "pi2") piTreatmentRange <- .assertIsValidPiTreatmentRange(piTreatmentRange = piTreatmentRange) .warnInCaseOfUnknownArguments( functionName = ".getConditionalPowerPlotRates", ignore = c("iterations", "seed", "stageResultsName", "grid"), ... ) condPowerValues <- rep(NA, length(piTreatmentRange)) likelihoodValues <- rep(NA, length(piTreatmentRange)) design <- stageResults$.design warningMessages <- c() withCallingHandlers( if (stageResults$isOneSampleDataset()) { mu <- stageResults$effectSizes[stage] stdErr <- sqrt(stageResults$effectSizes[stage] * (1 - stageResults$effectSizes[stage]) / stageResults$overallSampleSizes[stage]) for (i in seq(along = piTreatmentRange)) { if (.isTrialDesignInverseNormalOrGroupSequential(design)) { condPowerValues[i] <- .getConditionalPowerRatesInverseNormalOrGroupSequential( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = piTreatmentRange[i], pi2 = pi2 )$conditionalPower[design$kMax] } else if (.isTrialDesignFisher(design)) { condPowerValues[i] <- .getConditionalPowerRatesFisher( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = piTreatmentRange[i], pi2 = pi2 )$conditionalPower[design$kMax] } likelihoodValues[i] <- stats::dnorm(piTreatmentRange[i], mu, stdErr) / stats::dnorm(0, 0, stdErr) } }, warning = function(w) { m <- w$message if (!(m %in% warningMessages)) { warningMessages <<- c(warningMessages, m) } invokeRestart("muffleWarning") }, error = function(e) { e } ) if (stageResults$isTwoSampleDataset()) { mu <- stageResults$overallEvents1[stage] / stageResults$overallSampleSizes1[stage] stdErr <- sqrt(stageResults$overallEvents1[stage] / stageResults$overallSampleSizes1[stage] * (1 - stageResults$overallEvents1[stage] / stageResults$overallSampleSizes1[stage]) / stageResults$overallSampleSizes1[stage]) withCallingHandlers( for (i in seq(along = piTreatmentRange)) { if (.isTrialDesignInverseNormalOrGroupSequential(design)) { condPowerValues[i] <- .getConditionalPowerRatesInverseNormalOrGroupSequential( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = piTreatmentRange[i], pi2 = pi2 )$conditionalPower[design$kMax] } else if (.isTrialDesignFisher(design)) { condPowerValues[i] <- .getConditionalPowerRatesFisher( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, pi1 = piTreatmentRange[i], pi2 = pi2 )$conditionalPower[design$kMax] } likelihoodValues[i] <- stats::dnorm(piTreatmentRange[i], mu, stdErr) / stats::dnorm(0, 0, stdErr) }, warning = function(w) { m <- w$message if (!(m %in% warningMessages)) { warningMessages <<- c(warningMessages, m) } invokeRestart("muffleWarning") }, error = function(e) { e } ) } if (length(warningMessages) > 0) { for (m in warningMessages) { warning(m, call. = FALSE) } } if (stageResults$isOneSampleDataset()) { subtitle <- paste0("Stage = ", stage, ", # of remaining subjects = ", sum(nPlanned)) } else { subtitle <- paste0( "Stage = ", stage, ", # of remaining subjects = ", sum(nPlanned), ", pi2 = ", .formatSubTitleValue(pi2, "pi2"), ", allocation ratio = ", .formatSubTitleValue(allocationRatioPlanned, "allocationRatioPlanned") ) } return(list( xValues = piTreatmentRange, condPowerValues = condPowerValues, likelihoodValues = likelihoodValues, main = C_PLOT_MAIN_CONDITIONAL_POWER_WITH_LIKELIHOOD, xlab = "pi1", ylab = C_PLOT_YLAB_CONDITIONAL_POWER_WITH_LIKELIHOOD, sub = subtitle )) } #' #' Calculation of final confidence interval #' based on group sequential test without SSR (general case). #' #' @noRd #' .getFinalConfidenceIntervalRatesGroupSequential <- function(..., design, dataInput, stage, thetaH0 = C_THETA_H0_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsRates( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation ) finalConfidenceIntervalGeneral <- rep(NA_real_, 2) medianUnbiasedGeneral <- NA_real_ stageGroupSeq <- .getStageGroupSeq(design = design, stageResults = stageResults, stage = stage) finalStage <- min(stageGroupSeq, design$kMax) # early stopping or at end of study if (stageGroupSeq < design$kMax || stage == design$kMax) { if (stageGroupSeq == 1) { finalConfidenceIntervalGeneral[1] <- stageResults$overallTestStatistics[1] - .getOneMinusQNorm(design$alpha / design$sided) finalConfidenceIntervalGeneral[2] <- stageResults$overallTestStatistics[1] + .getOneMinusQNorm(design$alpha / design$sided) medianUnbiasedGeneral <- stageResults$overallTestStatistics[1] if (dataInput$getNumberOfGroups() == 1) { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral / sqrt(stageResults$overallSampleSizes[1]) medianUnbiasedGeneral <- medianUnbiasedGeneral / sqrt(stageResults$overallSampleSizes[1]) } else { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral * sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) medianUnbiasedGeneral <- medianUnbiasedGeneral * sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) } } else { finalConfidenceIntervalGeneral[1] <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "finalConfidenceIntervalGeneralLower" ) finalConfidenceIntervalGeneral[2] <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "finalConfidenceIntervalGeneralUpper" ) medianUnbiasedGeneral <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "overallPValues", case = "medianUnbiasedGeneral" ) } } if (is.na(finalConfidenceIntervalGeneral[1]) && (stageGroupSeq > 1)) { finalStage <- NA_integer_ } finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ if (!is.na(finalStage)) { # Retransformation if (dataInput$getNumberOfGroups() == 1) { stErrRates <- sqrt(stageResults$overallEvents[finalStage] / stageResults$overallSampleSizes[finalStage] * (1 - stageResults$overallEvents[finalStage] / stageResults$overallSampleSizes[finalStage])) / sqrt(stageResults$overallSampleSizes[finalStage]) } else { stErrRates <- sqrt(stageResults$overallEvents1[finalStage] / stageResults$overallSampleSizes1[finalStage] * (1 - stageResults$overallEvents1[finalStage] / stageResults$overallSampleSizes1[finalStage]) / stageResults$overallSampleSizes1[finalStage] + stageResults$overallEvents2[finalStage] / stageResults$overallSampleSizes2[finalStage] * (1 - stageResults$overallEvents2[finalStage] / stageResults$overallSampleSizes2[finalStage]) / stageResults$overallSampleSizes2[finalStage]) } directionUpperSign <- ifelse(directionUpper, 1, -1) if (stageGroupSeq == 1) { finalConfidenceInterval[1] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = 1, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, firstParameterName = "overallPValues", secondValue = .getOneMinusQNorm(design$alpha / design$sided), tolerance = tolerance, acceptResultsOutOfTolerance = TRUE, callingFunctionInformation = "Final confidence interval [1]" ) finalConfidenceInterval[2] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = 1, directionUpper = FALSE, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, firstParameterName = "overallPValues", secondValue = .getOneMinusQNorm(design$alpha / design$sided), tolerance = tolerance, acceptResultsOutOfTolerance = TRUE, callingFunctionInformation = "Final confidence interval [2]" ) medianUnbiased <- stageResults$effectSizes[1] } else { if (dataInput$getNumberOfGroups() == 1) { finalConfidenceInterval[1] <- finalConfidenceIntervalGeneral[1] * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 finalConfidenceInterval[2] <- finalConfidenceIntervalGeneral[2] * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 medianUnbiased <- medianUnbiasedGeneral * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 } else { finalConfidenceInterval[1] <- finalConfidenceIntervalGeneral[1] / sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 finalConfidenceInterval[2] <- finalConfidenceIntervalGeneral[2] / sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 medianUnbiased <- medianUnbiasedGeneral / sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 } } } if (!directionUpper) { medianUnbiasedGeneral <- -medianUnbiasedGeneral finalConfidenceIntervalGeneral <- -finalConfidenceIntervalGeneral if (stageGroupSeq > 1) { medianUnbiased <- -medianUnbiased finalConfidenceInterval <- -finalConfidenceInterval } } if (!any(is.na(finalConfidenceIntervalGeneral))) { finalConfidenceIntervalGeneral <- sort(finalConfidenceIntervalGeneral) } if (!any(is.na(finalConfidenceInterval))) { finalConfidenceInterval <- sort(finalConfidenceInterval) } if (dataInput$getNumberOfGroups() == 1) { finalConfidenceInterval[1] <- max(0, finalConfidenceInterval[1]) finalConfidenceInterval[2] <- min(1, finalConfidenceInterval[2]) } else { finalConfidenceInterval[1] <- max(-1, finalConfidenceInterval[1]) finalConfidenceInterval[2] <- min(1, finalConfidenceInterval[2]) } return(list( stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, tolerance = tolerance, finalStage = finalStage, medianUnbiasedGeneral = medianUnbiasedGeneral, finalConfidenceIntervalGeneral = finalConfidenceIntervalGeneral, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } #' #' Calculation of final confidence interval #' based on inverse normal method, only valid for kMax <= 2 or no SSR. #' #' @noRd #' .getFinalConfidenceIntervalRatesInverseNormal <- function(..., design, dataInput, stage, thetaH0 = C_THETA_H0_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsRates( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation ) finalConfidenceIntervalGeneral <- rep(NA_real_, 2) medianUnbiasedGeneral <- NA_real_ stageInverseNormal <- .getStageInverseNormal(design = design, stageResults = stageResults, stage = stage) finalStage <- min(stageInverseNormal, design$kMax) # Early stopping or at end of study if (stageInverseNormal < design$kMax || stage == design$kMax) { if (stageInverseNormal == 1) { finalConfidenceIntervalGeneral[1] <- stageResults$combInverseNormal[1] - .getOneMinusQNorm(design$alpha / design$sided) finalConfidenceIntervalGeneral[2] <- stageResults$combInverseNormal[1] + .getOneMinusQNorm(design$alpha / design$sided) medianUnbiasedGeneral <- stageResults$combInverseNormal[1] if (dataInput$getNumberOfGroups() == 1) { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral / sqrt(stageResults$overallSampleSizes[1]) medianUnbiasedGeneral <- medianUnbiasedGeneral / sqrt(stageResults$overallSampleSizes[1]) } else { finalConfidenceIntervalGeneral <- finalConfidenceIntervalGeneral * sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) medianUnbiasedGeneral <- medianUnbiasedGeneral * sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) } } else { if ((design$kMax > 2) && !.isNoEarlyEfficacy(design)) { message( "Calculation of final confidence interval performed for kMax = ", design$kMax, " (for kMax > 2, it is theoretically shown that it is valid only ", "if no sample size change was performed)" ) } finalConfidenceIntervalGeneral[1] <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "finalConfidenceIntervalGeneralLower" ) finalConfidenceIntervalGeneral[2] <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "finalConfidenceIntervalGeneralUpper" ) medianUnbiasedGeneral <- .getDecisionMatrixRoot( design = design, stage = finalStage, stageResults = stageResults, tolerance = tolerance, firstParameterName = "combInverseNormal", case = "medianUnbiasedGeneral" ) } } if (is.na(finalConfidenceIntervalGeneral[1]) && (stageInverseNormal > 1)) { finalStage <- NA_integer_ } finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ if (!is.na(finalStage)) { # Retransformation if (dataInput$getNumberOfGroups() == 1) { stErrRates <- sqrt(stageResults$overallEvents[finalStage] / stageResults$overallSampleSizes[finalStage] * (1 - stageResults$overallEvents[finalStage] / stageResults$overallSampleSizes[finalStage])) / sqrt(stageResults$overallSampleSizes[finalStage]) } else { stErrRates <- sqrt(stageResults$overallEvents1[finalStage] / stageResults$overallSampleSizes1[finalStage] * (1 - stageResults$overallEvents1[finalStage] / stageResults$overallSampleSizes1[finalStage]) / stageResults$overallSampleSizes1[finalStage] + stageResults$overallEvents2[finalStage] / stageResults$overallSampleSizes2[finalStage] * (1 - stageResults$overallEvents2[finalStage] / stageResults$overallSampleSizes2[finalStage]) / stageResults$overallSampleSizes2[finalStage]) } directionUpperSign <- ifelse(directionUpper, 1, -1) if (stageInverseNormal == 1) { finalConfidenceInterval[1] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = 1, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = TRUE, firstParameterName = "combInverseNormal", secondValue = .getOneMinusQNorm(design$alpha / design$sided), tolerance = tolerance, acceptResultsOutOfTolerance = TRUE, callingFunctionInformation = "Final confidence interval [1]" ) finalConfidenceInterval[2] <- .getRootThetaRates( design = design, dataInput = dataInput, stage = 1, directionUpper = FALSE, normalApproximation = TRUE, firstParameterName = "combInverseNormal", secondValue = .getOneMinusQNorm(design$alpha / design$sided), tolerance = tolerance, acceptResultsOutOfTolerance = TRUE, callingFunctionInformation = "Final confidence interval [1]" ) medianUnbiased <- stageResults$effectSizes[1] } else { if (dataInput$getNumberOfGroups() == 1) { finalConfidenceInterval[1] <- finalConfidenceIntervalGeneral[1] * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 finalConfidenceInterval[2] <- finalConfidenceIntervalGeneral[2] * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 medianUnbiased <- medianUnbiasedGeneral * sqrt(stageResults$overallSampleSizes[finalStage]) * stErrRates + directionUpperSign * thetaH0 } else { finalConfidenceInterval[1] <- finalConfidenceIntervalGeneral[1] / sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 finalConfidenceInterval[2] <- finalConfidenceIntervalGeneral[2] / sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 medianUnbiased <- medianUnbiasedGeneral / sqrt(1 / stageResults$overallSampleSizes1[finalStage] + 1 / stageResults$overallSampleSizes2[finalStage]) * stErrRates + directionUpperSign * thetaH0 } } } if (!directionUpper) { medianUnbiasedGeneral <- -medianUnbiasedGeneral finalConfidenceIntervalGeneral <- -finalConfidenceIntervalGeneral if (stageInverseNormal > 1) { medianUnbiased <- -medianUnbiased finalConfidenceInterval <- -finalConfidenceInterval } } finalConfidenceIntervalGeneral <- sort(finalConfidenceIntervalGeneral) finalConfidenceInterval <- sort(finalConfidenceInterval) if (dataInput$getNumberOfGroups() == 1) { finalConfidenceInterval[1] <- max(0, finalConfidenceInterval[1]) finalConfidenceInterval[2] <- min(1, finalConfidenceInterval[2]) } else { finalConfidenceInterval[1] <- max(-1, finalConfidenceInterval[1]) finalConfidenceInterval[2] <- min(1, finalConfidenceInterval[2]) } return(list( stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, tolerance = tolerance, finalStage = finalStage, medianUnbiasedGeneral = medianUnbiasedGeneral, finalConfidenceIntervalGeneral = finalConfidenceIntervalGeneral, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } #' #' Calculation of final confidence interval #' based on Fisher combination test, only valid for kMax <= 2. #' #' @noRd #' .getFinalConfidenceIntervalRatesFisher <- function(..., design, dataInput, stage, thetaH0 = C_THETA_H0_RATES_DEFAULT, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stageResults <- .getStageResultsRates( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation ) finalConfidenceInterval <- rep(NA_real_, 2) medianUnbiased <- NA_real_ stageFisher <- .getStageFisher(design = design, stageResults = stageResults, stage = stage) finalStage <- min(stageFisher, design$kMax) # Early stopping or at end of study if (stageFisher < design$kMax || stage == design$kMax) { message( "Calculation of final confidence interval for Fisher's ", "design not implemented yet" ) return(list( finalStage = NA_integer_, medianUnbiased = NA_real_, finalConfidenceInterval = rep(NA_real_, design$kMax) )) } return(list( stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, tolerance = tolerance, finalStage = finalStage, medianUnbiased = medianUnbiased, finalConfidenceInterval = finalConfidenceInterval )) } .getFinalConfidenceIntervalRates <- function(..., design, dataInput, thetaH0 = NA_real_, directionUpper = C_DIRECTION_UPPER_DEFAULT, normalApproximation = C_NORMAL_APPROXIMATION_RATES_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design) .assertIsValidThetaH0DataInput(thetaH0, dataInput) .warnInCaseOfUnknownArguments( functionName = "getFinalConfidenceIntervalRates", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck(design, powerCalculationEnabled = TRUE), "stage"), ... ) if (design$kMax == 1) { return(list( finalStage = NA_integer_, medianUnbiasedGeneral = NA_real_, finalConfidenceIntervalGeneral = c(NA_real_, NA_real_), medianUnbiased = NA_real_, finalConfidenceInterval = c(NA_real_) )) } if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_RATES_DEFAULT } if (.isTrialDesignGroupSequential(design)) { return(.getFinalConfidenceIntervalRatesGroupSequential( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, tolerance = tolerance )) } if (.isTrialDesignInverseNormal(design)) { return(.getFinalConfidenceIntervalRatesInverseNormal( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, tolerance = tolerance )) } if (.isTrialDesignFisher(design)) { return(.getFinalConfidenceIntervalRatesFisher( design = design, dataInput = dataInput, stage = stage, thetaH0 = thetaH0, directionUpper = directionUpper, normalApproximation = normalApproximation, tolerance = tolerance )) } .stopWithWrongDesignMessage(design, inclusiveConditionalDunnett = FALSE) } rpact/R/f_simulation_multiarm_rates.R0000644000176200001440000010433414445307576017534 0ustar liggesusers## | ## | *Simulation of multi-arm design with binary data* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_simulation_multiarm.R NULL .getSimulationRatesMultiArmStageSubjects <- function(..., stage, directionUpper, conditionalPower, conditionalCriticalValue, plannedSubjects, allocationRatioPlanned, selectedArms, piTreatmentsH1, piControlH1, overallRates, overallRatesControl, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage) { stage <- stage - 1 # to be consistent with non-multiarm situation gMax <- nrow(overallRates) if (!is.na(conditionalPower)) { if (any(selectedArms[1:gMax, stage + 1], na.rm = TRUE)) { if (is.na(piControlH1)) { piAssumedControlH1 <- overallRatesControl[stage] } else { piAssumedControlH1 <- piControlH1 } if (is.na(piTreatmentsH1)) { if (directionUpper) { piAssumedH1 <- min(overallRates[selectedArms[1:gMax, stage + 1], stage], na.rm = TRUE) } else { piAssumedH1 <- max(overallRates[selectedArms[1:gMax, stage + 1], stage], na.rm = TRUE) } } else { piAssumedH1 <- piTreatmentsH1 } pim <- (allocationRatioPlanned[stage] * piAssumedH1 + piAssumedControlH1) / (1 + allocationRatioPlanned[stage]) if (conditionalCriticalValue[stage] > 8) { newSubjects <- maxNumberOfSubjectsPerStage[stage + 1] } else { newSubjects <- (max(0, conditionalCriticalValue[stage] * sqrt(pim * (1 - pim) * (1 + allocationRatioPlanned[stage])) + .getQNorm(conditionalPower) * sqrt(piAssumedH1 * (1 - piAssumedH1) + piAssumedControlH1 * (1 - piAssumedControlH1) * allocationRatioPlanned[stage])))^2 / (max(1e-7, (2 * directionUpper - 1) * (piAssumedH1 - piAssumedControlH1)))^2 newSubjects <- min( max(minNumberOfSubjectsPerStage[stage + 1], newSubjects), maxNumberOfSubjectsPerStage[stage + 1] ) } } else { newSubjects <- 0 } } else { newSubjects <- plannedSubjects[stage + 1] - plannedSubjects[stage] } return(newSubjects) } .getSimulatedStageRatesMultiArm <- function(design, directionUpper, piVector, piControl, plannedSubjects, typeOfSelection, effectMeasure, adaptations, epsilonValue, rValue, threshold, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, piTreatmentsH1, piControlH1, calcSubjectsFunction, calcSubjectsFunctionIsUserDefined, selectArmsFunction) { kMax <- length(plannedSubjects) gMax <- length(piVector) simRates <- matrix(NA_real_, nrow = gMax + 1, ncol = kMax) overallEffectSizes <- matrix(NA_real_, nrow = gMax, ncol = kMax) subjectsPerStage <- matrix(NA_real_, nrow = gMax + 1, ncol = kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) conditionalCriticalValue <- rep(NA_real_, kMax - 1) conditionalPowerPerStage <- rep(NA_real_, kMax) selectedArms <- matrix(FALSE, nrow = gMax + 1, ncol = kMax) selectedArms[, 1] <- TRUE adjustedPValues <- rep(NA_real_, kMax) overallRates <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallRatesControl <- rep(NA_real_, kMax) if (.isTrialDesignFisher(design)) { weights <- .getWeightsFisher(design) } else if (.isTrialDesignInverseNormal(design)) { weights <- .getWeightsInverseNormal(design) } for (k in (1:kMax)) { if (k == 1) { subjectsPerStage[gMax + 1, k] <- trunc(plannedSubjects[k] / allocationRatioPlanned[k]) } else { subjectsPerStage[gMax + 1, k] <- trunc((plannedSubjects[k] - plannedSubjects[k - 1]) / allocationRatioPlanned[k]) } simRates[gMax + 1, k] <- stats::rbinom(1, subjectsPerStage[gMax + 1, k], piControl) / subjectsPerStage[gMax + 1, k] for (treatmentArm in (1:gMax)) { if (selectedArms[treatmentArm, k]) { if (k == 1) { subjectsPerStage[treatmentArm, k] <- plannedSubjects[k] } else { subjectsPerStage[treatmentArm, k] <- plannedSubjects[k] - plannedSubjects[k - 1] } simRates[treatmentArm, k] <- stats::rbinom(1, subjectsPerStage[treatmentArm, k], piVector[treatmentArm]) / subjectsPerStage[treatmentArm, k] rm <- (subjectsPerStage[treatmentArm, k] * simRates[treatmentArm, k] + subjectsPerStage[gMax + 1, k] * simRates[gMax + 1, k]) / (subjectsPerStage[treatmentArm, k] + subjectsPerStage[gMax + 1, k]) if (simRates[treatmentArm, k] - simRates[gMax + 1, k] == 0) { testStatistics[treatmentArm, k] <- 0 } else { testStatistics[treatmentArm, k] <- (2 * directionUpper - 1) * (simRates[treatmentArm, k] - simRates[gMax + 1, k]) / sqrt(rm * (1 - rm) * (1 / subjectsPerStage[treatmentArm, k] + 1 / subjectsPerStage[gMax + 1, k])) } separatePValues[treatmentArm, k] <- 1 - stats::pnorm(testStatistics[treatmentArm, k]) overallRates[treatmentArm, k] <- subjectsPerStage[treatmentArm, 1:k] %*% simRates[treatmentArm, 1:k] / sum(subjectsPerStage[treatmentArm, 1:k]) overallRatesControl[k] <- subjectsPerStage[gMax + 1, 1:k] %*% simRates[gMax + 1, 1:k] / sum(subjectsPerStage[gMax + 1, 1:k]) overallEffectSizes[treatmentArm, k] <- (2 * directionUpper - 1) * (overallRates[treatmentArm, k] - overallRatesControl[k]) rmOverall <- (allocationRatioPlanned[k] * overallRates[treatmentArm, k] + overallRatesControl[k]) / (allocationRatioPlanned[k] + 1) if (overallEffectSizes[treatmentArm, k] == 0) { overallTestStatistics[treatmentArm, k] <- 0 } else { overallTestStatistics[treatmentArm, k] <- overallEffectSizes[treatmentArm, k] / sqrt(rmOverall * (1 - rmOverall) * sqrt(1 / sum(subjectsPerStage[treatmentArm, 1:k]) + 1 / sum(subjectsPerStage[gMax + 1, 1:k]))) } } } if (k < kMax) { if (colSums(selectedArms)[k] == 1) { break } # Bonferroni adjustment adjustedPValues[k] <- min(min(separatePValues[, k], na.rm = TRUE) * (colSums(selectedArms)[k] - 1), 1 - 1e-12) # conditional critical value to reject the null hypotheses at the next stage of the trial if (.isTrialDesignConditionalDunnett(design)) { conditionalCriticalValue[k] <- (.getOneMinusQNorm(design$alpha) - .getOneMinusQNorm(adjustedPValues[k]) * sqrt(design$informationAtInterim)) / sqrt(1 - design$informationAtInterim) } else { if (.isTrialDesignFisher(design)) { conditionalCriticalValue[k] <- .getOneMinusQNorm(min((design$criticalValues[k + 1] / prod(adjustedPValues[1:k]^weights[1:k]))^(1 / weights[k + 1]), 1 - 1e-7)) } else { if (design$criticalValues[k + 1] >= 6) { conditionalCriticalValue[k] <- Inf } else { conditionalCriticalValue[k] <- (design$criticalValues[k + 1] * sqrt(design$informationRates[k + 1]) - .getOneMinusQNorm(adjustedPValues[1:k]) %*% weights[1:k]) / sqrt(design$informationRates[k + 1] - design$informationRates[k]) } } } if (adaptations[k]) { if (effectMeasure == "testStatistic") { selectedArms[, k + 1] <- (selectedArms[, k] & .selectTreatmentArms( k, overallTestStatistics[, k] + runif(gMax, -1e-05, 1e-05), typeOfSelection, epsilonValue, rValue, threshold, selectArmsFunction )) } else if (effectMeasure == "effectEstimate") { selectedArms[, k + 1] <- (selectedArms[, k] & .selectTreatmentArms( k, overallEffectSizes[, k] + runif(gMax, -1e-05, 1e-05), typeOfSelection, epsilonValue, rValue, threshold, selectArmsFunction )) } newSubjects <- calcSubjectsFunction( stage = k + 1, # to be consistent with non-multiarm situation, cf. line 39 directionUpper = directionUpper, conditionalPower = conditionalPower, conditionalCriticalValue = conditionalCriticalValue, plannedSubjects = plannedSubjects, allocationRatioPlanned = allocationRatioPlanned, selectedArms = selectedArms, piTreatmentsH1 = piTreatmentsH1, piControlH1 = piControlH1, overallRates = overallRates, overallRatesControl = overallRatesControl, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage ) if (is.null(newSubjects) || length(newSubjects) != 1 || !is.numeric(newSubjects) || is.na(newSubjects)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'calcSubjectsFunction' returned an illegal or undefined result (", newSubjects, "); ", "the output must be a single numeric value" ) } if (!is.na(conditionalPower) || calcSubjectsFunctionIsUserDefined) { plannedSubjects[(k + 1):kMax] <- ceiling(sum(subjectsPerStage[gMax + 1, 1:k] * allocationRatioPlanned[1:k]) + cumsum(rep(newSubjects, kMax - k))) } } else { selectedArms[, k + 1] <- selectedArms[, k] } if (is.na(piControlH1)) { piAssumedControlH1 <- overallRatesControl[k] } else { piAssumedControlH1 <- piControlH1 } if (is.na(piTreatmentsH1)) { if (directionUpper) { piAssumedH1 <- min(overallRates[selectedArms[1:gMax, k], k], na.rm = TRUE) } else { piAssumedH1 <- max(overallRates[selectedArms[1:gMax, k], k], na.rm = TRUE) } } else { piAssumedH1 <- piTreatmentsH1 } pim <- (allocationRatioPlanned[k] * piAssumedH1 + piAssumedControlH1) / (1 + allocationRatioPlanned[k]) if (piAssumedH1 * (1 - piAssumedH1) + piAssumedControlH1 * (1 - piAssumedControlH1) == 0) { thetaStandardized <- 0 } else { thetaStandardized <- sqrt(allocationRatioPlanned[k]) / (1 + allocationRatioPlanned[k]) * ((piAssumedH1 - piAssumedControlH1) * sqrt(1 + allocationRatioPlanned[k]) / sqrt(piAssumedH1 * (1 - piAssumedH1) + allocationRatioPlanned[k] * piAssumedControlH1 * (1 - piAssumedControlH1)) + sign(piAssumedH1 - piAssumedControlH1) * conditionalCriticalValue[k] * (1 - sqrt(pim * (1 - pim) + allocationRatioPlanned[k] * pim * (1 - pim)) / sqrt(piAssumedH1 * (1 - piAssumedH1) + allocationRatioPlanned[k] * piAssumedControlH1 * (1 - piAssumedControlH1))) * sqrt((1 + allocationRatioPlanned[k]) / (plannedSubjects[k + 1] - plannedSubjects[k])) ) } thetaStandardized <- (2 * directionUpper - 1) * thetaStandardized conditionalPowerPerStage[k] <- 1 - stats::pnorm(conditionalCriticalValue[k] - thetaStandardized * sqrt((1 + allocationRatioPlanned[k]) / allocationRatioPlanned[k]) * sqrt(plannedSubjects[k + 1] - plannedSubjects[k])) } } return(list( subjectsPerStage = subjectsPerStage, allocationRatioPlanned = allocationRatioPlanned, overallEffectSizes = overallEffectSizes, testStatistics = testStatistics, directionUpper = directionUpper, overallTestStatistics = overallTestStatistics, overallRatesControl = overallRatesControl, overallRates = overallRates, separatePValues = separatePValues, conditionalCriticalValue = conditionalCriticalValue, conditionalPowerPerStage = conditionalPowerPerStage, selectedArms = selectedArms )) } #' #' @title #' Get Simulation Multi-Arm Rates #' #' @description #' Returns the simulated power, stopping and selection probabilities, conditional power, #' and expected sample size for testing rates in a multi-arm treatment groups testing situation. #' #' @param piMaxVector Range of assumed probabilities for the treatment group with #' highest response for \code{"linear"} and \code{"sigmoidEmax"} model, #' default is \code{seq(0, 1, 0.2)}. #' @param piControl If specified, the assumed probability in the control arm #' for simulation and under which the sample size recalculation is performed. #' @param piTreatmentsH1 If specified, the assumed probability in the active treatment arm(s) #' under which the sample size recalculation is performed. #' @param piControlH1 If specified, the assumed probability in the reference group #' (if different from \code{piControl}) for which the conditional power was calculated. #' @inheritParams param_intersectionTest_MultiArm #' @inheritParams param_typeOfSelection #' @inheritParams param_effectMeasure #' @inheritParams param_adaptations #' @inheritParams param_threshold #' @inheritParams param_effectMatrix #' @inheritParams param_activeArms #' @inheritParams param_successCriterion #' @inheritParams param_typeOfShape #' @inheritParams param_typeOfSelection #' @inheritParams param_design_with_default #' @inheritParams param_directionUpper #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_plannedSubjects #' @inheritParams param_minNumberOfSubjectsPerStage #' @inheritParams param_maxNumberOfSubjectsPerStage #' @inheritParams param_conditionalPowerSimulation #' @inheritParams param_maxNumberOfIterations #' @inheritParams param_calcSubjectsFunction #' @inheritParams param_selectArmsFunction #' @inheritParams param_rValue #' @inheritParams param_epsilonValue #' @inheritParams param_gED50 #' @inheritParams param_slope #' @inheritParams param_seed #' @inheritParams param_three_dots #' @inheritParams param_showStatistics #' #' @details #' At given design the function simulates the power, stopping probabilities, #' selection probabilities, and expected sample size at given number of subjects, #' parameter configuration, and treatment arm selection rule in the multi-arm situation. #' An allocation ratio can be specified referring to the ratio of number of #' subjects in the active treatment groups as compared to the control group. #' #' The definition of \code{pi1H1} and/or \code{piControl} makes only sense if \code{kMax} > 1 #' and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and #' \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. #' #' \code{calcSubjectsFunction}\cr #' This function returns the number of subjects at given conditional power and #' conditional critical value for specified testing situation. #' The function might depend on the variables #' \code{stage}, #' \code{selectedArms}, #' \code{directionUpper}, #' \code{plannedSubjects}, #' \code{allocationRatioPlanned}, #' \code{minNumberOfSubjectsPerStage}, #' \code{maxNumberOfSubjectsPerStage}, #' \code{conditionalPower}, #' \code{conditionalCriticalValue}, #' \code{overallRates}, #' \code{overallRatesControl}, #' \code{piTreatmentsH1}, and #' \code{piControlH1}. #' The function has to contain the three-dots argument '...' (see examples). #' #' @template return_object_simulation_results #' @template how_to_get_help_for_generics #' #' @template examples_get_simulation_multiarm_rates #' #' @export #' getSimulationMultiArmRates <- function(design = NULL, ..., activeArms = 3L, # C_ACTIVE_ARMS_DEFAULT effectMatrix = NULL, typeOfShape = c("linear", "sigmoidEmax", "userDefined"), # C_TYPE_OF_SHAPE_DEFAULT piMaxVector = seq(0.2, 0.5, 0.1), # C_PI_1_DEFAULT piControl = 0.2, # C_PI_2_DEFAULT gED50 = NA_real_, slope = 1, intersectionTest = c("Dunnett", "Bonferroni", "Simes", "Sidak", "Hierarchical"), # C_INTERSECTION_TEST_MULTIARMED_DEFAULT directionUpper = TRUE, # C_DIRECTION_UPPER_DEFAULT adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), # C_TYPE_OF_SELECTION_DEFAULT effectMeasure = c("effectEstimate", "testStatistic"), # C_EFFECT_MEASURE_DEFAULT successCriterion = c("all", "atLeastOne"), # C_SUCCESS_CRITERION_DEFAULT epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedSubjects = NA_real_, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, piTreatmentsH1 = NA_real_, piControlH1 = NA_real_, maxNumberOfIterations = 1000L, # C_MAX_SIMULATION_ITERATIONS_DEFAULT seed = NA_real_, calcSubjectsFunction = NULL, selectArmsFunction = NULL, showStatistics = FALSE) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "simulation") .warnInCaseOfUnknownArguments( functionName = "getSimulationMultiArmRates", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "showStatistics"), ... ) } else { .assertIsTrialDesignInverseNormalOrFisherOrConditionalDunnett(design) .warnInCaseOfUnknownArguments( functionName = "getSimulationMultiArmRates", ignore = "showStatistics", ... ) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsOneSidedDesign(design, designType = "multi-arm", engineType = "simulation") calcSubjectsFunctionIsUserDefined <- !is.null(calcSubjectsFunction) simulationResults <- .createSimulationResultsMultiArmObject( design = design, activeArms = activeArms, effectMatrix = effectMatrix, typeOfShape = typeOfShape, piMaxVector = piMaxVector, # rates only piControl = piControl, # rates only gED50 = gED50, slope = slope, intersectionTest = intersectionTest, directionUpper = directionUpper, # rates + survival only adaptations = adaptations, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, successCriterion = successCriterion, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, plannedSubjects = plannedSubjects, # means + rates only allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, # means + rates only maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, # means + rates only conditionalPower = conditionalPower, piTreatmentsH1 = piTreatmentsH1, # rates only piControlH1 = piControlH1, # rates only maxNumberOfIterations = maxNumberOfIterations, seed = seed, calcSubjectsFunction = calcSubjectsFunction, # means + rates only selectArmsFunction = selectArmsFunction, showStatistics = showStatistics, endpoint = "rates" ) design <- simulationResults$.design successCriterion <- simulationResults$successCriterion effectMeasure <- simulationResults$effectMeasure adaptations <- simulationResults$adaptations gMax <- activeArms kMax <- simulationResults$.design$kMax intersectionTest <- simulationResults$intersectionTest typeOfSelection <- simulationResults$typeOfSelection effectMatrix <- t(simulationResults$effectMatrix) piMaxVector <- simulationResults$piMaxVector # rates only piControl <- simulationResults$piControl # rates only piTreatmentsH1 <- simulationResults$piTreatmentsH1 # rates only piControlH1 <- simulationResults$piControlH1 # rates only conditionalPower <- simulationResults$conditionalPower minNumberOfSubjectsPerStage <- simulationResults$minNumberOfSubjectsPerStage maxNumberOfSubjectsPerStage <- simulationResults$maxNumberOfSubjectsPerStage allocationRatioPlanned <- simulationResults$allocationRatioPlanned calcSubjectsFunction <- simulationResults$calcSubjectsFunction if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, kMax) } indices <- .getIndicesOfClosedHypothesesSystemForSimulation(gMax = gMax) if (.isTrialDesignConditionalDunnett(design)) { criticalValuesDunnett <- .getCriticalValuesDunnettForSimulation( alpha = design$alpha, indices = indices, allocationRatioPlanned = allocationRatioPlanned ) } cols <- length(piMaxVector) simulatedSelections <- array(0, dim = c(kMax, cols, gMax + 1)) simulatedRejections <- array(0, dim = c(kMax, cols, gMax)) simulatedNumberOfActiveArms <- matrix(0, nrow = kMax, ncol = cols) simulatedSubjectsPerStage <- array(0, dim = c(kMax, cols, gMax + 1)) simulatedSuccessStopping <- matrix(0, nrow = kMax, ncol = cols) simulatedFutilityStopping <- matrix(0, nrow = kMax - 1, ncol = cols) simulatedConditionalPower <- matrix(0, nrow = kMax, ncol = cols) simulatedRejectAtLeastOne <- rep(0, cols) expectedNumberOfSubjects <- rep(0, cols) iterations <- matrix(0, nrow = kMax, ncol = cols) len <- maxNumberOfIterations * kMax * gMax * cols dataIterationNumber <- rep(NA_real_, len) dataStageNumber <- rep(NA_real_, len) dataArmNumber <- rep(NA_real_, len) dataAlternative <- rep(NA_real_, len) dataEffect <- rep(NA_real_, len) dataSubjectsControlArm <- rep(NA_real_, len) dataSubjectsActiveArm <- rep(NA_real_, len) dataNumberOfSubjects <- rep(NA_real_, len) dataNumberOfCumulatedSubjects <- rep(NA_real_, len) dataRejectPerStage <- rep(NA, len) dataFutilityStop <- rep(NA_real_, len) dataSuccessStop <- rep(NA, len) dataFutilityStop <- rep(NA, len) dataTestStatistics <- rep(NA_real_, len) dataConditionalCriticalValue <- rep(NA_real_, len) dataConditionalPowerAchieved <- rep(NA_real_, len) dataEffectEstimate <- rep(NA_real_, len) dataPValuesSeparate <- rep(NA_real_, len) index <- 1 for (i in 1:cols) { for (j in 1:maxNumberOfIterations) { stageResults <- .getSimulatedStageRatesMultiArm( design = design, directionUpper = directionUpper, piVector = effectMatrix[i, ], piControl = piControl, plannedSubjects = plannedSubjects, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, adaptations = adaptations, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, conditionalPower = conditionalPower, piTreatmentsH1 = piTreatmentsH1, piControlH1 = piControlH1, calcSubjectsFunction = calcSubjectsFunction, calcSubjectsFunctionIsUserDefined = calcSubjectsFunctionIsUserDefined, selectArmsFunction = selectArmsFunction ) if (.isTrialDesignConditionalDunnett(design)) { closedTest <- .performClosedConditionalDunnettTestForSimulation( stageResults = stageResults, design = design, indices = indices, criticalValuesDunnett = criticalValuesDunnett, successCriterion = successCriterion ) } else { closedTest <- .performClosedCombinationTestForSimulationMultiArm( stageResults = stageResults, design = design, indices = indices, intersectionTest = intersectionTest, successCriterion = successCriterion ) } rejectAtSomeStage <- FALSE rejectedArmsBefore <- rep(FALSE, gMax) for (k in 1:kMax) { simulatedRejections[k, i, ] <- simulatedRejections[k, i, ] + (closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore) simulatedSelections[k, i, ] <- simulatedSelections[k, i, ] + closedTest$selectedArms[, k] simulatedNumberOfActiveArms[k, i] <- simulatedNumberOfActiveArms[k, i] + sum(closedTest$selectedArms[, k]) if (!any(is.na(closedTest$successStop))) { simulatedSuccessStopping[k, i] <- simulatedSuccessStopping[k, i] + closedTest$successStop[k] } if ((kMax > 1) && (k < kMax)) { if (!any(is.na(closedTest$futilityStop))) { simulatedFutilityStopping[k, i] <- simulatedFutilityStopping[k, i] + (closedTest$futilityStop[k] && !closedTest$successStop[k]) } if (!closedTest$successStop[k] && !closedTest$futilityStop[k]) { simulatedConditionalPower[k + 1, i] <- simulatedConditionalPower[k + 1, i] + stageResults$conditionalPowerPerStage[k] } } iterations[k, i] <- iterations[k, i] + 1 for (g in (1:(gMax + 1))) { if (!is.na(stageResults$subjectsPerStage[g, k])) { simulatedSubjectsPerStage[k, i, g] <- simulatedSubjectsPerStage[k, i, g] + stageResults$subjectsPerStage[g, k] } } for (g in 1:gMax) { dataIterationNumber[index] <- j dataStageNumber[index] <- k dataArmNumber[index] <- g dataAlternative[index] <- piMaxVector[i] dataEffect[index] <- effectMatrix[i, g] dataSubjectsControlArm[index] <- round(stageResults$subjectsPerStage[gMax + 1, k], 1) dataSubjectsActiveArm[index] <- round(stageResults$subjectsPerStage[g, k], 1) dataNumberOfSubjects[index] <- round(sum(stageResults$subjectsPerStage[, k], na.rm = TRUE), 1) dataNumberOfCumulatedSubjects[index] <- round(sum(stageResults$subjectsPerStage[, 1:k], na.rm = TRUE), 1) dataRejectPerStage[index] <- closedTest$rejected[g, k] dataTestStatistics[index] <- stageResults$testStatistics[g, k] dataSuccessStop[index] <- closedTest$successStop[k] if (k < kMax) { dataFutilityStop[index] <- closedTest$futilityStop[k] dataConditionalCriticalValue[index] <- stageResults$conditionalCriticalValue[k] dataConditionalPowerAchieved[index + 1] <- stageResults$conditionalPowerPerStage[k] } dataEffectEstimate[index] <- stageResults$overallEffectSizes[g, k] dataPValuesSeparate[index] <- closedTest$separatePValues[g, k] index <- index + 1 } if (!rejectAtSomeStage && any(closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore)) { simulatedRejectAtLeastOne[i] <- simulatedRejectAtLeastOne[i] + 1 rejectAtSomeStage <- TRUE } if ((k < kMax) && (closedTest$successStop[k] || closedTest$futilityStop[k])) { # rejected hypotheses remain rejected also in case of early stopping simulatedRejections[(k + 1):kMax, i, ] <- simulatedRejections[(k + 1):kMax, i, ] + matrix((closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore), kMax - k, gMax, byrow = TRUE ) break } rejectedArmsBefore <- closedTest$rejected[, k] & closedTest$selectedArms[1:gMax, k] | rejectedArmsBefore } } simulatedSubjectsPerStage[is.na(simulatedSubjectsPerStage)] <- 0 simulatedSubjectsPerStage[, i, ] <- simulatedSubjectsPerStage[, i, ] / iterations[, i] if (kMax > 1) { simulatedRejections[2:kMax, i, ] <- simulatedRejections[2:kMax, i, ] - simulatedRejections[1:(kMax - 1), i, ] stopping <- cumsum(simulatedSuccessStopping[1:(kMax - 1), i] + simulatedFutilityStopping[, i]) / maxNumberOfIterations expectedNumberOfSubjects[i] <- sum(simulatedSubjectsPerStage[1, i, ] + t(1 - stopping) %*% simulatedSubjectsPerStage[2:kMax, i, ]) } else { expectedNumberOfSubjects[i] <- sum(simulatedSubjectsPerStage[1, i, ]) } } simulatedConditionalPower[1, ] <- NA_real_ if (kMax > 1) { simulatedConditionalPower[2:kMax, ] <- as.matrix(simulatedConditionalPower[2:kMax, ] / iterations[2:kMax, ]) } simulationResults$rejectAtLeastOne <- simulatedRejectAtLeastOne / maxNumberOfIterations simulationResults$numberOfActiveArms <- simulatedNumberOfActiveArms / iterations - 1 simulationResults$selectedArms <- simulatedSelections / maxNumberOfIterations simulationResults$rejectedArmsPerStage <- simulatedRejections / maxNumberOfIterations simulationResults$successPerStage <- simulatedSuccessStopping / maxNumberOfIterations simulationResults$futilityPerStage <- simulatedFutilityStopping / maxNumberOfIterations simulationResults$futilityStop <- base::colSums(simulatedFutilityStopping / maxNumberOfIterations) if (kMax > 1) { simulationResults$earlyStop <- simulationResults$futilityPerStage + simulationResults$successPerStage[1:(kMax - 1), ] simulationResults$conditionalPowerAchieved <- simulatedConditionalPower } simulationResults$sampleSizes <- simulatedSubjectsPerStage simulationResults$expectedNumberOfSubjects <- expectedNumberOfSubjects simulationResults$iterations <- iterations if (!all(is.na(simulationResults$conditionalPowerAchieved))) { simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } if (any(simulationResults$rejectedArmsPerStage < 0)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "internal error, simulation not possible due to numerical overflow") } data <- data.frame( iterationNumber = dataIterationNumber, stageNumber = dataStageNumber, armNumber = dataArmNumber, piMax = dataAlternative, effect = dataEffect, numberOfSubjects = dataNumberOfSubjects, numberOfCumulatedSubjects = dataNumberOfCumulatedSubjects, subjectsControlArm = dataSubjectsControlArm, subjectsActiveArm = dataSubjectsActiveArm, effectEstimate = dataEffectEstimate, testStatistics = dataTestStatistics, pValue = dataPValuesSeparate, conditionalCriticalValue = round(dataConditionalCriticalValue, 6), conditionalPowerAchieved = round(dataConditionalPowerAchieved, 6), rejectPerStage = dataRejectPerStage, successStop = dataSuccessStop, futilityPerStage = dataFutilityStop ) data <- data[!is.na(data$effectEstimate), ] simulationResults$.data <- data return(simulationResults) } rpact/R/f_design_fisher_combination_test.R0000644000176200001440000003625714445307575020501 0ustar liggesusers## | ## | *Fisher combination test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_constants.R #' @include f_core_utilities.R #' @include f_logger.R NULL .getFisherCombinationSize <- function(kMax, alpha0Vec, criticalValues, tVec, cases = .getFisherCombinationCases(kMax = kMax, tVec = tVec)) { return(getFisherCombinationSizeCpp(kMax, alpha0Vec, criticalValues, tVec, cases)) } #' @title #' Get Design Fisher #' #' @description #' Performs Fisher's combination test and returns critical values for this design. #' #' @inheritParams param_kMax #' @inheritParams param_alpha #' @param method \code{"equalAlpha"}, \code{"fullAlpha"}, \code{"noInteraction"}, or \code{"userDefinedAlpha"}, #' default is \code{"equalAlpha"} (for details, see Wassmer, 1999). #' @inheritParams param_userAlphaSpending #' @param alpha0Vec Stopping for futility bounds for stage-wise p-values. #' @inheritParams param_informationRates #' @inheritParams param_sided #' @param bindingFutility If \code{bindingFutility = TRUE} is specified the calculation of #' the critical values is affected by the futility bounds (default is \code{TRUE}). #' @param tolerance The numerical tolerance, default is \code{1e-14}. #' @param iterations The number of simulation iterations, e.g., #' \code{getDesignFisher(iterations = 100000)} checks the validity of the critical values for the design. #' The default value of \code{iterations} is 0, i.e., no simulation will be executed. #' @param seed Seed for simulating the power for Fisher's combination test. See above, default is a random seed. #' @inheritParams param_three_dots #' #' @details #' \code{getDesignFisher()} calculates the critical values and stage levels for #' Fisher's combination test as described in Bauer (1989), Bauer and Koehne (1994), #' Bauer and Roehmel (1995), and Wassmer (1999) for equally and unequally sized stages. #' #' @seealso \code{\link[=getDesignSet]{getDesignSet()}} for creating a set of designs to compare. #' #' @template return_object_trial_design #' @template how_to_get_help_for_generics #' #' @family design functions #' #' @template examples_get_design_Fisher #' #' @export #' getDesignFisher <- function(..., kMax = NA_integer_, alpha = NA_real_, method = c("equalAlpha", "fullAlpha", "noInteraction", "userDefinedAlpha"), # C_FISHER_METHOD_DEFAULT userAlphaSpending = NA_real_, alpha0Vec = NA_real_, informationRates = NA_real_, sided = 1, # C_SIDED_DEFAULT bindingFutility = NA, tolerance = 1e-14, # C_ANALYSIS_TOLERANCE_FISHER_DEFAULT iterations = 0, seed = NA_real_) { .assertIsValidTolerance(tolerance) .assertIsValidIterationsAndSeed(iterations, seed) .warnInCaseOfUnknownArguments(functionName = "getDesignFisher", ...) return(.getDesignFisher( kMax = kMax, alpha = alpha, method = method, userAlphaSpending = userAlphaSpending, alpha0Vec = alpha0Vec, informationRates = informationRates, sided = sided, bindingFutility = bindingFutility, tolerance = tolerance, iterations = iterations, seed = seed, userFunctionCallEnabled = TRUE )) } .getDesignFisherDefaultValues <- function() { return(list( kMax = NA_integer_, alpha = NA_real_, method = C_FISHER_METHOD_DEFAULT, userAlphaSpending = NA_real_, alpha0Vec = NA_real_, informationRates = NA_real_, sided = 1, bindingFutility = C_BINDING_FUTILITY_FISHER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT, iterations = 0, seed = NA_real_ )) } .getFisherCombinationCases <- function(kMax, tVec) { return(getFisherCombinationCasesCpp(kMax, tVec)) } #' #' @param userFunctionCallEnabled if \code{TRUE}, additional parameter validation methods will be called. #' #' @noRd #' .getDesignFisher <- function(kMax = NA_integer_, alpha = NA_real_, method = C_FISHER_METHOD_DEFAULT, userAlphaSpending = NA_real_, alpha0Vec = NA_real_, informationRates = NA_real_, sided = 1, bindingFutility = C_BINDING_FUTILITY_FISHER_DEFAULT, tolerance = C_ANALYSIS_TOLERANCE_FISHER_DEFAULT, iterations = 0, seed = NA_real_, userFunctionCallEnabled = FALSE) { method <- .matchArgument(method, C_FISHER_METHOD_DEFAULT) .assertIsNumericVector(alpha0Vec, "alpha0Vec", naAllowed = TRUE) if (.isDefinedArgument(kMax, argumentExistsValidationEnabled = userFunctionCallEnabled)) { .assertIsValidKMax(kMax, kMaxUpperBound = C_KMAX_UPPER_BOUND_FISHER) if (!is.integer(kMax)) { kMax <- as.integer(kMax) } } if (!is.integer(sided) && sided %in% c(1, 2)) { sided <- as.integer(sided) } if (sided != 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Fisher's combination test only available for one-sided testing") } if (is.na(bindingFutility)) { bindingFutility <- C_BINDING_FUTILITY_FISHER_DEFAULT } else if (userFunctionCallEnabled && ((!is.na(kMax) && kMax == 1) || (!any(is.na(alpha0Vec)) && all(alpha0Vec == C_ALPHA_0_VEC_DEFAULT)))) { warning("'bindingFutility' (", bindingFutility, ") will be ignored", call. = FALSE) } design <- TrialDesignFisher( kMax = kMax, alpha = alpha, method = method, sided = sided, userAlphaSpending = userAlphaSpending, alpha0Vec = alpha0Vec, informationRates = informationRates, bindingFutility = bindingFutility, tolerance = tolerance, iterations = as.integer(iterations), seed = seed ) .assertDesignParameterExists(design, "sided", C_SIDED_DEFAULT) .assertIsValidSidedParameter(design$sided) .assertDesignParameterExists(design, "method", C_FISHER_METHOD_DEFAULT) .assertIsSingleCharacter(design$method, "method") if (!.isFisherMethod(design$method)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'method' must be one of the following: ", .printFisherMethods() ) } .assertDesignParameterExists(design, "bindingFutility", C_BINDING_FUTILITY_FISHER_DEFAULT) .assertDesignParameterExists(design, "tolerance", C_ANALYSIS_TOLERANCE_FISHER_DEFAULT) .setKmaxBasedOnAlphaSpendingDefintion(design) design$informationRates <- .getValidatedInformationRates(design) design$alpha0Vec <- .getValidatedAlpha0Vec(design) if (design$sided == 2 && design$bindingFutility && any(design$alpha0Vec < 1)) { warning("Binding futility will be ignored because the test is defined as two-sided", call. = FALSE) } if (design$method == C_FISHER_METHOD_USER_DEFINED_ALPHA) { .validateUserAlphaSpending(design) } else { design$.setParameterType("userAlphaSpending", C_PARAM_NOT_APPLICABLE) if (.isDefinedArgument(design$userAlphaSpending)) { warning("'userAlphaSpending' will be ignored because 'method' is not '", C_FISHER_METHOD_USER_DEFINED_ALPHA, "'", call. = FALSE ) } } if (.isUndefinedArgument(design$alpha)) { design$alpha <- C_ALPHA_DEFAULT } .assertDesignParameterExists(design, "alpha", C_ALPHA_DEFAULT) .assertIsSingleNumber(design$alpha, "alpha") .assertIsValidSidedParameter(sided) if (sided != 1) { design$alpha <- design$alpha / sided } if (userFunctionCallEnabled) { .assertIsValidAlpha(design$alpha) } .assertDesignParameterExists(design, "kMax", 3) .assertIsSingleInteger(design$kMax, "kMax") .assertIsValidKMax(design$kMax, kMaxUpperBound = C_KMAX_UPPER_BOUND_FISHER) if (design$method == C_FISHER_METHOD_NO_INTERACTION && design$kMax < 3) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "method '", C_FISHER_METHOD_NO_INTERACTION, "' is only allowed for kMax > 2 (kMax is ", design$kMax, ")" ) } if (design$kMax > 1) { design$scale <- round(sqrt((design$informationRates[2:design$kMax] - design$informationRates[1:(design$kMax - 1)]) / design$informationRates[1]), 10) } design$criticalValues <- rep(NA_real_, design$kMax) design$.setParameterType("scale", C_PARAM_NOT_APPLICABLE) design$.setParameterType("criticalValues", C_PARAM_GENERATED) if (design$bindingFutility) { alpha0Vec <- design$alpha0Vec } else { alpha0Vec <- rep(1, design$kMax - 1) } if (design$method == C_FISHER_METHOD_NO_INTERACTION && !any(is.na(alpha0Vec)) && all(alpha0Vec == C_ALPHA_0_VEC_DEFAULT)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "for specified 'method' (\"", C_FISHER_METHOD_NO_INTERACTION, "\") the 'alpha0Vec' must be unequal to ", .arrayToString(alpha0Vec, vectorLookAndFeelEnabled = TRUE), " and 'bindingFutility' must be TRUE" ) } design$.setParameterType("stageLevels", C_PARAM_GENERATED) design$.setParameterType("alphaSpent", C_PARAM_GENERATED) design$.setParameterType("nonStochasticCurtailment", C_PARAM_GENERATED) tryCatch( { cases <- .getFisherCombinationCases(kMax = design$kMax, tVec = design$scale) result <- getDesignFisherTryCpp( design$kMax, design$alpha, design$tolerance, design$criticalValues, design$scale, alpha0Vec, design$userAlphaSpending, design$method ) design$criticalValues <- result$criticalValues design$alphaSpent <- result$alphaSpent design$stageLevels <- result$stageLevels design$nonStochasticCurtailment <- result$nonStochasticCurtailment size <- result$size design$stageLevels <- sapply(1:design$kMax, function(k) { .getFisherCombinationSize(k, rep(1, k - 1), rep(design$criticalValues[k], k), design$scale, cases = cases ) }) design$alphaSpent <- sapply(1:design$kMax, function(k) { .getFisherCombinationSize(k, alpha0Vec[1:(k - 1)], design$criticalValues[1:k], design$scale, cases = cases ) }) design$nonStochasticCurtailment <- FALSE if (design$stageLevels[1] < 1e-10) { design$criticalValues[1:(design$kMax - 1)] <- design$criticalValues[design$kMax] design$stageLevels <- sapply( 1:design$kMax, function(k) { .getFisherCombinationSize(k, rep(1, k - 1), rep(design$criticalValues[k], k), design$scale, cases = cases ) } ) design$alphaSpent <- sapply( 1:design$kMax, function(k) { .getFisherCombinationSize(k, alpha0Vec[1:(k - 1)], design$criticalValues[1:k], design$scale, cases = cases ) } ) design$nonStochasticCurtailment <- TRUE } }, error = function(e) { warning("Output may be wrong because an error occured: ", e$message, call. = FALSE) } ) if (userFunctionCallEnabled) { if (design$method == C_FISHER_METHOD_NO_INTERACTION && abs(size - design$alpha) > 1e-03) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "numerical overflow in computation routine") } if (design$method == C_FISHER_METHOD_EQUAL_ALPHA && !all(is.na(design$stageLevels)) && abs(mean(na.omit(design$stageLevels)) - design$stageLevels[1]) > 1e-03) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "numerical overflow in computation routine") } if (design$kMax > 1) { diff <- na.omit(design$criticalValues[2:design$kMax] - design$criticalValues[1:(design$kMax - 1)]) if (length(diff) > 0 && any(diff > 1e-12)) { .logDebug( "Stop creation of Fisher design because critical values are ", .arrayToString(criticalValues, vectorLookAndFeelEnabled = TRUE), ", ", "i.e., differences are ", .arrayToString(diff, vectorLookAndFeelEnabled = TRUE) ) stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "no calculation possible") } if (!all(is.na(design$stageLevels)) && any(na.omit(design$stageLevels[1:(design$kMax - 1)]) > design$alpha)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'alpha' (", design$alpha, ") not correctly specified" ) } } if (design$method == C_FISHER_METHOD_USER_DEFINED_ALPHA) { if (any(abs(design$alphaSpent - design$userAlphaSpending) > 1e-05)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'alpha' (", design$alpha, ") or 'userAlphaSpending' (", .arrayToString(design$userAlphaSpending), ") not correctly specified" ) } } } design$.setParameterType("simAlpha", C_PARAM_NOT_APPLICABLE) design$simAlpha <- NA_real_ if (!is.null(design$iterations) && !is.na(design$iterations) && design$iterations > 0) { design$.setParameterType("seed", ifelse(!is.null(design$seed) && !is.na(design$seed), C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE )) design$seed <- .setSeed(design$seed) design$simAlpha <- getSimulatedAlphaCpp( kMax = design$kMax, alpha0 = design$alpha0Vec, criticalValues = design$criticalValues, tVec = design$scale, iterations = iterations ) design$.setParameterType("simAlpha", C_PARAM_GENERATED) design$.setParameterType("iterations", C_PARAM_USER_DEFINED) } if (design$kMax == 1) { design$.setParameterType("alpha0Vec", C_PARAM_NOT_APPLICABLE) } if (length(design$alpha0Vec) == 0 || all(design$alpha0Vec == C_ALPHA_0_VEC_DEFAULT)) { design$.setParameterType("bindingFutility", C_PARAM_NOT_APPLICABLE) } design$.initStages() return(design) } rpact/R/pkgname.R0000644000176200001440000000772614450463134013351 0ustar liggesusers## | ## | *rpact* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7147 $ ## | Last changed: $Date: 2023-07-03 08:10:31 +0200 (Mo, 03 Jul 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' #' @title #' rpact - Confirmatory Adaptive Clinical Trial Design and Analysis #' #' @description #' rpact (R Package for Adaptive Clinical Trials) is a comprehensive package that enables #' the design, simulation, and analysis of confirmatory adaptive group sequential designs. #' Particularly, the methods described in the recent monograph by Wassmer and Brannath #' (published by Springer, 2016) are implemented. It also comprises advanced methods for sample #' size calculations for fixed sample size designs incl., e.g., sample size calculation for survival #' trials with piecewise exponentially distributed survival times and staggered patients entry. #' #' @details #' rpact includes the classical group sequential designs (incl. user spending function approaches) #' where the sample sizes per stage (or the time points of interim analysis) cannot be changed #' in a data-driven way. #' Confirmatory adaptive designs explicitly allow for this under control of the Type I error rate. #' They are either based on the combination testing or the conditional rejection #' probability (CRP) principle. #' Both are available, for the former the inverse normal combination test and #' Fisher's combination test can be used. #' #' Specific techniques of the adaptive methodology are also available, e.g., #' overall confidence intervals, overall p-values, and conditional and predictive power assessments. #' Simulations can be performed to assess the design characteristics of a (user-defined) sample size #' recalculation strategy. Designs are available for trials with continuous, binary, and survival endpoint. #' #' For more information please visit \href{https://www.rpact.org}{www.rpact.org}. #' If you are interested in professional services round about the package or need #' a comprehensive validation documentation to fulfill regulatory requirements #' please visit \href{https://www.rpact.com}{www.rpact.com}. #' #' rpact is developed by #' \itemize{ #' \item Gernot Wassmer (\email{gernot.wassmer@@rpact.com}) and #' \item Friedrich Pahlke (\email{friedrich.pahlke@@rpact.com}). #' } #' #' @references #' Wassmer, G., Brannath, W. (2016) Group Sequential and Confirmatory Adaptive Designs #' in Clinical Trials (Springer Series in Pharmaceutical Statistics; \doi{10.1007/978-3-319-32562-0}) #' #' @docType package #' @author Gernot Wassmer, Friedrich Pahlke #' @importFrom Rcpp evalCpp #' @useDynLib rpact, .registration = TRUE #' @name rpact #' #' @import methods #' @import stats #' @import utils #' @import graphics #' @import tools #' @importFrom rlang .data #' @importFrom knitr kable #' @importFrom knitr knit_print #' "_PACKAGE" #> [1] "_PACKAGE" .onAttach <- function(libname, pkgname) { if (grepl("^\\d\\.\\d\\.\\d\\.\\d{4,4}$", packageVersion("rpact"))) { packageStartupMessage(paste0("rpact developer version ", packageVersion("rpact"), " loaded")) } } .onUnload <- function(libpath) { tryCatch( { library.dynam.unload("rpact", libpath) }, error = function(e) { .logWarn("Failed to unload dynamic C library", e) } ) } .onDetach <- function(libpath) { packageStartupMessage(paste0("rpact ", packageVersion("rpact"), " successfully unloaded\n")) } rpact/R/class_core_plot_settings.R0000644000176200001440000007465314445307575017037 0ustar liggesusers## | ## | *Plot setting classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | PlotSubTitleItem <- setRefClass("PlotSubTitleItem", fields = list( title = "character", subscript = "character", value = "numeric", digits = "integer" ), methods = list( initialize = function(..., title, value, subscript = NA_character_, digits = 3L) { callSuper( title = trimws(title), value = value, subscript = trimws(subscript), digits = digits, ... ) value <<- round(value, digits) }, show = function() { cat(toString(), "\n") }, toQuote = function() { if (!is.null(subscript) && length(subscript) == 1 && !is.na(subscript)) { return(bquote(" " * .(title)[.(subscript)] == .(value))) } return(bquote(" " * .(title) == .(value))) }, toString = function() { if (!is.null(subscript) && length(subscript) == 1 && !is.na(subscript)) { if (grepl("^(\\d+)|max|min$", subscript)) { return(paste0(title, "_", subscript, " = ", value)) } return(paste0(title, "(", trimws(subscript), ") = ", value)) } return(paste(title, "=", value)) } ) ) PlotSubTitleItems <- setRefClass("PlotSubTitleItems", fields = list( title = "character", subtitle = "character", items = "list" ), methods = list( initialize = function(...) { callSuper(...) items <<- list() }, show = function() { cat(title, "\n") if (length(subtitle) == 1 && !is.na(subtitle)) { cat(subtitle, "\n") } s <- toString() if (length(s) == 1 && !is.na(s) && nchar(s) > 0) { cat(s, "\n") } }, addItem = function(item) { items <<- c(items, item) }, add = function(title, value, subscript = NA_character_, ..., digits = 3L) { titleTemp <- title if (length(items) == 0) { titleTemp <- .formatCamelCase(titleTemp, title = TRUE) } titleTemp <- paste0(" ", titleTemp) if (length(subscript) == 1 && !is.na(subscript)) { subscript <- paste0(as.character(subscript), " ") } else { titleTemp <- paste0(titleTemp, " ") } addItem(PlotSubTitleItem(title = titleTemp, value = value, subscript = subscript, digits = digits)) }, toString = function() { if (is.null(items) || length(items) == 0) { return(NA_character_) } s <- character(0) for (item in items) { s <- c(s, item$toString()) } return(paste0(s, collapse = ", ")) }, toHtml = function() { htmlStr <- title if (length(subtitle) == 1 && !is.na(subtitle)) { htmlStr <- paste0(htmlStr, "
", subtitle, "") } s <- toString() if (length(s) == 1 && !is.na(s) && nchar(s) > 0) { htmlStr <- paste0(htmlStr, "
", s, "") } return(htmlStr) }, toQuote = function() { quotedItems <- .getQuotedItems() if (is.null(quotedItems)) { if (length(subtitle) > 0) { return(bquote(atop( bold(.(title)), atop(.(subtitle)) ))) } return(title) } if (length(subtitle) > 0) { return(bquote(atop( bold(.(title)), atop(.(subtitle) * "," ~ .(quotedItems)) ))) } return(bquote(atop( bold(.(title)), atop(.(quotedItems)) ))) }, .getQuotedItems = function() { item1 <- NULL item2 <- NULL item3 <- NULL item4 <- NULL if (length(items) > 0) { item1 <- items[[1]] } if (length(items) > 1) { item2 <- items[[2]] } if (length(items) > 2) { item3 <- items[[3]] } if (length(items) > 3) { item4 <- items[[4]] } if (!is.null(item1) && !is.null(item2) && !is.null(item3) && !is.null(item4)) { if (length(item1$subscript) == 1 && !is.na(item1$subscript) && length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(" " * .(item1$title)[.(item1$subscript)] == .(item1$value) * "," ~ .(item2$title)[.(item2$subscript)] == .(item2$value) * "," ~ .(item3$title) == .(item3$value) * "," ~ .(item4$title) == .(item4$value) * "")) } if (length(item1$subscript) == 1 && !is.na(item1$subscript)) { return(bquote(" " * .(item1$title)[.(item1$subscript)] == .(item1$value) * "," ~ .(item2$title) == .(item2$value) * "," ~ .(item3$title) == .(item3$value) * "," ~ .(item4$title) == .(item4$value) * "")) } if (length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(" " * .(item1$title) == .(item1$value) * "," ~ .(item2$title)[.(item2$subscript)] == .(item2$value) * "," ~ .(item3$title) == .(item3$value) * "," ~ .(item4$title) == .(item4$value) * "")) } return(bquote(" " * .(item1$title) == .(item1$value) * "," ~ .(item2$title) == .(item2$value) * "," ~ .(item3$title) == .(item3$value) * "," ~ .(item4$title) == .(item4$value) * "")) } if (!is.null(item1) && !is.null(item2) && !is.null(item3)) { if (length(item1$subscript) == 1 && !is.na(item1$subscript) && length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(" " * .(item1$title)[.(item1$subscript)] == .(item1$value) * "," ~ .(item2$title)[.(item2$subscript)] == .(item2$value) * "," ~ .(item3$title) == .(item3$value) * "")) } if (length(item1$subscript) == 1 && !is.na(item1$subscript)) { return(bquote(" " * .(item1$title)[.(item1$subscript)] == .(item1$value) * "," ~ .(item2$title) == .(item2$value) * "," ~ .(item3$title) == .(item3$value) * "")) } if (length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(" " * .(item1$title) == .(item1$value) * "," ~ .(item2$title)[.(item2$subscript)] == .(item2$value) * "," ~ .(item3$title) == .(item3$value) * "")) } return(bquote(" " * .(item1$title) == .(item1$value) * "," ~ .(item2$title) == .(item2$value) * "," ~ .(item3$title) == .(item3$value) * "")) } if (!is.null(item1) && !is.null(item2)) { if (length(item1$subscript) == 1 && !is.na(item1$subscript) && length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(" " * .(item1$title)[.(item1$subscript)] == .(item1$value) * "," ~ .(item2$title)[.(item2$subscript)] == .(item2$value) * "")) } if (length(item1$subscript) == 1 && !is.na(item1$subscript)) { return(bquote(" " * .(item1$title)[.(item1$subscript)] == .(item1$value) * "," ~ .(item2$title) == .(item2$value) * "")) } if (length(item2$subscript) == 1 && !is.na(item2$subscript)) { return(bquote(" " * .(item1$title) == .(item1$value) * "," ~ .(item2$title)[.(item2$subscript)] == .(item2$value) * "")) } return(bquote(" " * .(item1$title) == .(item1$value) * "," ~ .(item2$title) == .(item2$value) * "")) } if (!is.null(item1)) { if (length(item1$subscript) == 1 && !is.na(item1$subscript)) { return(bquote(" " * .(item1$title)[.(item1$subscript)] == .(item1$value) * "")) } return(bquote(" " * .(item1$title) == .(item1$value) * "")) } return(NULL) } ) ) #' #' @title #' Get Plot Settings #' #' @description #' Returns a plot settings object. #' #' @param lineSize The line size, default is \code{0.8}. #' @param pointSize The point size, default is \code{3}. #' @param pointColor The point color (character), default is \code{NA_character_}. #' @param mainTitleFontSize The main title font size, default is \code{14}. #' @param axesTextFontSize The axes text font size, default is \code{10}. #' @param legendFontSize The legend font size, default is \code{11}. #' @param scalingFactor The scaling factor, default is \code{1}. #' #' @details #' Returns an object of class \code{PlotSettings} that collects typical plot settings. #' #' @export #' #' @keywords internal #' getPlotSettings <- function(lineSize = 0.8, pointSize = 3, pointColor = NA_character_, mainTitleFontSize = 14, axesTextFontSize = 10, legendFontSize = 11, scalingFactor = 1) { return(PlotSettings( lineSize = lineSize, pointSize = pointSize, pointColor = pointColor, mainTitleFontSize = mainTitleFontSize, axesTextFontSize = axesTextFontSize, legendFontSize = legendFontSize, scalingFactor = scalingFactor )) } #' #' @name PlotSettings #' #' @title #' Plot Settings #' #' @description #' Class for plot settings. #' #' @field lineSize The line size. #' @field pointSize The point size. #' @field pointColor The point color, e.g., "red" or "blue". #' @field mainTitleFontSize The main tile font size. #' @field axesTextFontSize The text font size. #' @field legendFontSize The legend font size. #' @field scalingFactor The scaling factor. #' #' @details #' Collects typical plot settings in an object. #' #' @keywords internal #' #' @include class_core_parameter_set.R #' #' @importFrom methods new #' PlotSettings <- setRefClass("PlotSettings", contains = "ParameterSet", fields = list( .legendLineBreakIndex = "numeric", .pointSize = "numeric", .legendFontSize = "numeric", .htmlTitle = "character", .scalingEnabled = "logical", .pointScalingCorrectionEnabled = "logical", .pointBorderEnabled = "logical", lineSize = "numeric", pointSize = "numeric", pointColor = "character", mainTitleFontSize = "numeric", axesTextFontSize = "numeric", legendFontSize = "numeric", scalingFactor = "numeric" ), methods = list( initialize = function(lineSize = 0.8, pointSize = 3, pointColor = NA_character_, mainTitleFontSize = 14, axesTextFontSize = 10, legendFontSize = 11, scalingFactor = 1, ...) { callSuper( lineSize = lineSize, pointSize = pointSize, pointColor = pointColor, mainTitleFontSize = mainTitleFontSize, axesTextFontSize = axesTextFontSize, legendFontSize = legendFontSize, scalingFactor = scalingFactor, ... ) .legendLineBreakIndex <<- 15 .pointSize <<- pointSize .legendFontSize <<- legendFontSize .htmlTitle <<- NA_character_ .scalingEnabled <<- TRUE .pointScalingCorrectionEnabled <<- TRUE .pointBorderEnabled <<- TRUE .parameterNames <<- list( "lineSize" = "Line size", "pointSize" = "Point size", "pointColor" = "Point color", "mainTitleFontSize" = "Main title font size", "axesTextFontSize" = "Axes text font size", "legendFontSize" = "Legend font size", "scalingFactor" = "Scaling factor" ) }, clone = function() { return(PlotSettings( lineSize = .self$lineSize, pointSize = .self$pointSize, pointColor = .self$pointColor, mainTitleFontSize = .self$mainTitleFontSize, axesTextFontSize = .self$axesTextFontSize, legendFontSize = .self$legendFontSize, scalingFactor = .self$scalingFactor )) }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing plot setting objects" .resetCat() .showParametersOfOneGroup( parameters = .getVisibleFieldNames(), title = "Plot settings", orderByParameterName = FALSE, consoleOutputEnabled = consoleOutputEnabled ) }, setColorPalette = function(p, palette, mode = c("colour", "fill", "all")) { "Sets the color palette" mode <- match.arg(mode) # l = 45: make colors slightly darker if (is.null(palette) || is.na(palette)) { if (mode %in% c("colour", "all")) { p <- p + ggplot2::scale_colour_hue(l = 45) } if (mode %in% c("fill", "all")) { p <- p + ggplot2::scale_fill_hue(l = 45) } } else if (is.character(palette)) { if (mode %in% c("colour", "all")) { p <- p + ggplot2::scale_colour_brewer(palette = palette) } if (mode %in% c("fill", "all")) { p <- p + ggplot2::scale_fill_brewer(palette = palette) } } else if (palette == 0) { if (mode %in% c("colour", "all")) { p <- p + ggplot2::scale_colour_grey() } if (mode %in% c("fill", "all")) { p <- p + ggplot2::scale_fill_grey() } } else { if (mode %in% c("colour", "all")) { p <- p + ggplot2::scale_colour_hue(l = 45) } if (mode %in% c("fill", "all")) { p <- p + ggplot2::scale_fill_hue(l = 45) } } return(p) }, enlargeAxisTicks = function(p) { "Enlarges the axis ticks" p <- p + ggplot2::theme(axis.ticks.length = ggplot2::unit(scaleSize(0.3), "cm")) return(p) }, setAxesAppearance = function(p) { "Sets the font size and face of the axes titles and texts" p <- p + ggplot2::theme(axis.title.x = ggplot2::element_text(size = scaleSize(.self$axesTextFontSize + 1), face = "bold")) p <- p + ggplot2::theme(axis.title.y = ggplot2::element_text(size = scaleSize(.self$axesTextFontSize + 1), face = "bold")) p <- p + ggplot2::theme(axis.text.x = ggplot2::element_text(size = scaleSize(.self$axesTextFontSize))) p <- p + ggplot2::theme(axis.text.y = ggplot2::element_text(size = scaleSize(.self$axesTextFontSize))) return(p) }, # Sets the axes labels setAxesLabels = function(p, xAxisLabel = NULL, yAxisLabel1 = NULL, yAxisLabel2 = NULL, xlab = NA_character_, ylab = NA_character_, scalingFactor1 = 1, scalingFactor2 = 1) { if (is.null(xAxisLabel) && !is.na(xlab)) { xAxisLabel <- xlab } plotLabsType <- getOption("rpact.plot.labs.type", "quote") if (plotLabsType == "quote" && !is.null(xAxisLabel)) { if (xAxisLabel == "Theta") { xAxisLabel <- bquote(bold("Theta" ~ Theta)) } else if (xAxisLabel == "pi1") { xAxisLabel <- bquote(bold("pi"["1"])) } else if (xAxisLabel == "pi2") { xAxisLabel <- bbquote(bold("pi"["2"])) } else if (xAxisLabel == "Theta") { xAxisLabel <- bquote(bold("Theta" ~ Theta)) } } p <- p + ggplot2::xlab(xAxisLabel) if (sum(is.na(ylab)) == 0) { yAxisLabel1 <- ylab[1] if (length(ylab) == 2) { yAxisLabel2 <- ylab[2] } } p <- p + ggplot2::ylab(yAxisLabel1) p <- setSecondYAxisOnRightSide(p, yAxisLabel1, yAxisLabel2, scalingFactor1, scalingFactor2) return(p) }, setSecondYAxisOnRightSide = function(p, yAxisLabel1, yAxisLabel2, scalingFactor1 = 1, scalingFactor2 = 1) { if (!is.null(yAxisLabel2) && scalingFactor1 != scalingFactor2) { p <- p + ggplot2::scale_y_continuous(yAxisLabel1, sec.axis = ggplot2::sec_axis(~ . * scalingFactor1 / scalingFactor2, name = yAxisLabel2) ) } return(p) }, setLegendTitle = function(p, legendTitle, mode = c("colour", "fill")) { mode <- match.arg(mode) if (!is.null(legendTitle) && !is.na(legendTitle) && trimws(legendTitle) != "") { if (mode == "colour") { p <- p + ggplot2::labs(colour = .getTextLineWithLineBreak(legendTitle, lineBreakIndex = scaleSize(.legendLineBreakIndex) )) } else { p <- p + ggplot2::labs(fill = .getTextLineWithLineBreak(legendTitle, lineBreakIndex = scaleSize(.legendLineBreakIndex) )) } p <- p + ggplot2::theme(legend.title = ggplot2::element_text( colour = "black", size = scaleSize(.self$legendFontSize + 1), face = "bold" )) } else { p <- p + ggplot2::theme(legend.title = ggplot2::element_blank()) p <- p + ggplot2::labs(colour = NULL) } return(p) }, setLegendLabelSize = function(p) { p <- p + ggplot2::theme(legend.text = ggplot2::element_text(size = scaleSize(.self$legendFontSize))) return(p) }, setLegendPosition = function(p, legendPosition) { .assertIsValidLegendPosition(legendPosition) switch(as.character(legendPosition), "-1" = { p <- p + ggplot2::theme(legend.position = "none") }, "0" = { p <- p + ggplot2::theme(aspect.ratio = 1) }, "1" = { p <- p + ggplot2::theme(legend.position = c(0.05, 1), legend.justification = c(0, 1)) }, "2" = { p <- p + ggplot2::theme(legend.position = c(0.05, 0.5), legend.justification = c(0, 0.5)) }, "3" = { p <- p + ggplot2::theme(legend.position = c(0.05, 0.05), legend.justification = c(0, 0)) }, "4" = { p <- p + ggplot2::theme(legend.position = c(0.95, 1), legend.justification = c(1, 1)) }, "5" = { p <- p + ggplot2::theme(legend.position = c(0.95, 0.5), legend.justification = c(1, 0.5)) }, "6" = { p <- p + ggplot2::theme(legend.position = c(0.95, 0.05), legend.justification = c(1, 0)) } ) return(p) }, setLegendBorder = function(p) { "Sets the legend border" if (packageVersion("ggplot2") >= "3.4.0") { p <- p + ggplot2::theme( legend.background = ggplot2::element_rect(fill = "white", colour = "black", linewidth = scaleSize(0.4)) ) } else { p <- p + ggplot2::theme( legend.background = ggplot2::element_rect(fill = "white", colour = "black", size = scaleSize(0.4)) ) } return(p) }, adjustPointSize = function(adjustingValue) { .assertIsInClosedInterval(adjustingValue, "adjustingValue", lower = 0.1, upper = 2) pointSize <<- .self$.pointSize * adjustingValue }, adjustLegendFontSize = function(adjustingValue) { "Adjusts the legend font size, e.g., run \\cr \\code{adjustLegendFontSize(-2)} # makes the font size 2 points smaller" .assertIsInClosedInterval(adjustingValue, "adjustingValue", lower = 0.1, upper = 2) legendFontSize <<- .self$.legendFontSize * adjustingValue }, scaleSize = function(size, pointEnabled = FALSE) { if (isFALSE(.self$.scalingEnabled)) { return(size) } if (pointEnabled) { if (isFALSE(.pointScalingCorrectionEnabled)) { return(size) } return(size * .self$scalingFactor^2) } return(size * .self$scalingFactor) }, setMainTitle = function(p, mainTitle, subtitle = NA_character_) { "Sets the main title" caption <- NA_character_ if (!is.null(mainTitle) && inherits(mainTitle, "PlotSubTitleItems")) { plotLabsType <- getOption("rpact.plot.labs.type", "quote") if (plotLabsType == "quote") { mainTitle <- mainTitle$toQuote() } else { items <- mainTitle mainTitle <- items$title if (length(items$subtitle) == 1 && !is.na(items$subtitle)) { if (length(subtitle) == 1 && !is.na(subtitle)) { subtitle <- paste0(subtitle, ", ", items$subtitle) } else { subtitle <- items$subtitle } } s <- items$toString() if (length(s) == 1 && !is.na(s) && nchar(s) > 0) { plotLabsCaptionEnabled <- as.logical(getOption("rpact.plot.labs.caption.enabled", "true")) if (isTRUE(plotLabsCaptionEnabled)) { caption <- s } else { if (length(subtitle) == 1 && !is.na(subtitle)) { subtitle <- paste0(subtitle, ", ", s) } else { subtitle <- s } } } if (plotLabsType == "html") { .htmlTitle <<- items$toHtml() } } } subtitleFontSize <- NA_real_ if (length(subtitle) == 1 && !is.na(subtitle)) { if (is.na(caption)) { caption <- ggplot2::waiver() } p <- p + ggplot2::labs(title = mainTitle, subtitle = subtitle, caption = caption) targetWidth <- 130 subtitleFontSize <- targetWidth / nchar(subtitle) * 8 if (subtitleFontSize > scaleSize(.self$mainTitleFontSize) - 2) { subtitleFontSize <- scaleSize(.self$mainTitleFontSize) - 2 } } else if (length(caption) == 1 && !is.na(caption)) { p <- p + ggplot2::labs(title = mainTitle, caption = caption) } else { p <- p + ggplot2::ggtitle(mainTitle) } p <- p + ggplot2::theme(plot.title = ggplot2::element_text( hjust = 0.5, size = scaleSize(.self$mainTitleFontSize), face = "bold" )) if (!is.na(subtitleFontSize)) { p <- p + ggplot2::theme( plot.subtitle = ggplot2::element_text( hjust = 0.5, size = scaleSize(subtitleFontSize) ) ) } return(p) }, setMarginAroundPlot = function(p, margin = 0.2) { "Sets the margin around the plot, e.g., run \\cr \\code{setMarginAroundPlot(p, .2)} or \\cr \\code{setMarginAroundPlot(p, c(.1, .2, .1, .2)}" if (length(margin == 1)) { margin <- base::rep(margin, 4) } if (!(length(margin) %in% c(1, 4))) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'margin' (", .arrayToString(margin), ") must be a numeric vector with length 1 or 4" ) } p <- p + ggplot2::theme(plot.margin = ggplot2::unit(margin, "cm")) return(p) }, expandAxesRange = function(p, x = NA_real_, y = NA_real_) { "Expands the axes range" if (!is.na(x)) { p <- p + ggplot2::expand_limits(x = x) } if (!is.na(y)) { p <- p + ggplot2::expand_limits(y = y) } return(p) }, hideGridLines = function(p) { "Hides the grid lines" p <- p + ggplot2::theme(panel.grid.major.x = ggplot2::element_blank()) p <- p + ggplot2::theme(panel.grid.minor.x = ggplot2::element_blank()) p <- p + ggplot2::theme(panel.grid.major.y = ggplot2::element_blank()) p <- p + ggplot2::theme(panel.grid.minor.y = ggplot2::element_blank()) return(p) }, setTheme = function(p) { "Sets the theme" p <- p + ggplot2::theme_bw() p <- p + ggplot2::theme( panel.border = ggplot2::element_blank(), axis.line = ggplot2::element_line(colour = "black") ) return(p) }, plotPoints = function(p, pointBorder, ..., mapping = NULL) { # plot white border around the points if (pointBorder > 0 && .pointBorderEnabled) { p <- p + ggplot2::geom_point( mapping = mapping, color = "white", size = scaleSize(.self$pointSize, TRUE), alpha = 1, shape = 21, stroke = pointBorder / 2.25, show.legend = FALSE ) } if (!is.null(.self$pointColor) && length(.self$pointColor) == 1 && !is.na(.self$pointColor)) { p <- p + ggplot2::geom_point( mapping = mapping, color = .self$pointColor, size = scaleSize(.self$pointSize, TRUE), alpha = 1, shape = 19, show.legend = FALSE ) } else { p <- p + ggplot2::geom_point( mapping = mapping, size = scaleSize(.self$pointSize, TRUE), alpha = 1, shape = 19, show.legend = FALSE ) } return(p) }, plotValues = function(p, ..., plotLineEnabled = TRUE, plotPointsEnabled = TRUE, pointBorder = 4) { if (plotLineEnabled) { if (packageVersion("ggplot2") >= "3.4.0") { p <- p + ggplot2::geom_line(linewidth = scaleSize(.self$lineSize)) } else { p <- p + ggplot2::geom_line(size = scaleSize(.self$lineSize)) } } if (plotPointsEnabled) { p <- plotPoints(p, pointBorder) } return(p) }, mirrorYValues = function(p, yValues, plotLineEnabled = TRUE, plotPointsEnabled = TRUE, pointBorder = 4) { if (plotLineEnabled) { if (packageVersion("ggplot2") >= "3.4.0") { p <- p + ggplot2::geom_line(ggplot2::aes(y = -yValues), linewidth = scaleSize(.self$lineSize)) } else { p <- p + ggplot2::geom_line(ggplot2::aes(y = -yValues), size = scaleSize(.self$lineSize)) } } if (plotPointsEnabled) { p <- plotPoints(p, pointBorder, mapping = ggplot2::aes(y = -yValues)) } return(p) }, addCompanyAnnotation = function(p, enabled = TRUE) { if (!enabled) { return(p) } label <- "www.rpact.org" p <- p + ggplot2::annotate("label", x = -Inf, y = Inf, hjust = -0.1, vjust = 1, label = label, size = scaleSize(2.8), colour = "white", fill = "white" ) p <- p + ggplot2::annotate("text", x = -Inf, y = Inf, label = label, hjust = -.12, vjust = 1, colour = "lightgray", size = scaleSize(2.7) ) return(p) } ) ) rpact/R/f_simulation_enrichment.R0000644000176200001440000010336014445307576016636 0ustar liggesusers## | ## | *Simulation of enrichment design with combination test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_simulation_utilities.R #' @include f_core_utilities.R NULL .getIndicesOfSelectedSubsets <- function(gMax) { subsets <- .getAllAvailableSubsets(1:gMax) subsets <- subsets[grepl(as.character(gMax), subsets)] indexList <- list() subsetIndex <- 1 if (length(subsets) > 1) { subsetIndex <- c(2:length(subsets), 1) } for (i in subsetIndex) { s <- subsets[i] indices <- as.integer(strsplit(s, "", fixed = TRUE)[[1]]) indexList[[length(indexList) + 1]] <- indices } return(indexList) } .createSelectedSubsets <- function(stage, selectedPopulations) { gMax <- nrow(selectedPopulations) selectedVector <- rep(FALSE, 2^(gMax - 1)) if (gMax == 1) { selectedVector[1] <- selectedPopulations[1, stage] } if (gMax == 2) { selectedVector[1] <- selectedPopulations[1, stage] || selectedPopulations[2, stage] selectedVector[2] <- selectedPopulations[2, stage] } if (gMax == 3) { selectedVector[1] <- selectedPopulations[1, stage] || selectedPopulations[3, stage] selectedVector[2] <- selectedPopulations[2, stage] || selectedPopulations[3, stage] selectedVector[3] <- selectedPopulations[1, stage] || selectedPopulations[2, stage] || selectedPopulations[3, stage] selectedVector[4] <- selectedPopulations[3, stage] } if (gMax == 4) { selectedVector[1] <- selectedPopulations[1, stage] || selectedPopulations[4, stage] selectedVector[2] <- selectedPopulations[2, stage] || selectedPopulations[4, stage] selectedVector[3] <- selectedPopulations[3, stage] || selectedPopulations[4, stage] selectedVector[4] <- selectedPopulations[1, stage] || selectedPopulations[2, stage] || selectedPopulations[4, stage] selectedVector[5] <- selectedPopulations[1, stage] || selectedPopulations[3, stage] || selectedPopulations[4, stage] selectedVector[6] <- selectedPopulations[2, stage] || selectedPopulations[3, stage] || selectedPopulations[4, stage] selectedVector[7] <- selectedPopulations[1, stage] || selectedPopulations[2, stage] || selectedPopulations[3, stage] || selectedPopulations[4, stage] selectedVector[8] <- selectedPopulations[4, stage] } return(selectedVector) } .selectPopulations <- function(stage, effectVector, typeOfSelection, epsilonValue, rValue, threshold, selectPopulationsFunction) { gMax <- length(effectVector) if (typeOfSelection != "userDefined") { if (typeOfSelection == "all") { selectedPopulations <- rep(TRUE, gMax) } else { selectedPopulations <- rep(FALSE, gMax) if (typeOfSelection == "best") { selectedPopulations[which.max(effectVector)] <- TRUE } else if (tolower(typeOfSelection) == "rbest") { selectedPopulations[order(effectVector, decreasing = TRUE)[1:rValue]] <- TRUE selectedPopulations[is.na(effectVector)] <- FALSE } else if (typeOfSelection == "epsilon") { selectedPopulations[max(effectVector, na.rm = TRUE) - effectVector <= epsilonValue] <- TRUE selectedPopulations[is.na(effectVector)] <- FALSE } } selectedPopulations[effectVector <= threshold] <- FALSE } else { functionArgumentNames <- .getFunctionArgumentNames(selectPopulationsFunction, ignoreThreeDots = TRUE) if (length(functionArgumentNames) == 1) { .assertIsValidFunction( fun = selectPopulationsFunction, funArgName = "selectPopulationsFunction", expectedArguments = c("effectVector"), validateThreeDots = FALSE ) selectedPopulations <- selectPopulationsFunction(effectVector) } else { .assertIsValidFunction( fun = selectPopulationsFunction, funArgName = "selectPopulationsFunction", expectedArguments = c("effectVector", "stage"), validateThreeDots = FALSE ) selectedPopulations <- selectPopulationsFunction(effectVector = effectVector, stage = stage) } selectedPopulations[is.na(effectVector)] <- FALSE msg <- paste0( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'selectPopulationsFunction' returned an illegal or undefined result (", .arrayToString(selectedPopulations), "); " ) if (length(selectedPopulations) != gMax) { stop(msg, "the output must be a logical vector of length 'gMax' (", gMax, ")") } if (!is.logical(selectedPopulations)) { stop(msg, "the output must be a logical vector (is ", .getClassName(selectedPopulations), ")") } } return(selectedPopulations) } .performClosedCombinationTestForSimulationEnrichment <- function(..., stageResults, design, indices, intersectionTest, successCriterion) { if (.isTrialDesignGroupSequential(design) && (design$kMax > 1)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Group sequential design cannot be used for enrichment designs with population selection" ) } gMax <- nrow(stageResults$testStatistics) kMax <- design$kMax adjustedStageWisePValues <- matrix(NA_real_, nrow = 2^gMax - 1, ncol = kMax) overallAdjustedTestStatistics <- matrix(NA_real_, nrow = 2^gMax - 1, ncol = kMax) rejected <- matrix(FALSE, nrow = gMax, ncol = kMax) rejectedIntersections <- matrix(FALSE, nrow = nrow(indices), ncol = kMax) futility <- matrix(FALSE, nrow = gMax, ncol = kMax - 1) futilityIntersections <- matrix(FALSE, nrow = nrow(indices), ncol = kMax - 1) rejectedIntersectionsBefore <- matrix(FALSE, nrow = nrow(indices), ncol = 1) successStop <- rep(FALSE, kMax) futilityStop <- rep(FALSE, kMax - 1) if (.isTrialDesignFisher(design)) { weightsFisher <- .getWeightsFisher(design) } else { weightsInverseNormal <- .getWeightsInverseNormal(design) } if (gMax == 1) { intersectionTest <- "Bonferroni" } separatePValues <- stageResults$separatePValues if (intersectionTest == "SpiessensDebois") { subjectsPerStage <- stageResults[[ifelse( !is.null(stageResults[["subjectsPerStage"]]), "subjectsPerStage", "eventsPerStage" )]] testStatistics <- stageResults$testStatistics } else { subjectsPerStage <- NULL testStatistics <- NULL } for (k in 1:kMax) { for (i in 1:(2^gMax - 1)) { if (!all(is.na(separatePValues[indices[i, ] == 1, k]))) { if (intersectionTest == "SpiessensDebois") { subjectsSelected <- as.numeric(na.omit(subjectsPerStage[indices[i, ] == 1 & stageResults$selectedPopulations[, k], k])) if (length(subjectsSelected) == 1) { sigma <- 1 } else { sigma <- matrix(sqrt(subjectsSelected[1] / sum(subjectsSelected)), nrow = 2, ncol = 2) diag(sigma) <- 1 } maxTestStatistic <- max(testStatistics[indices[i, ] == 1, k], na.rm = TRUE) adjustedStageWisePValues[i, k] <- 1 - .getMultivariateDistribution( type = "normal", upper = maxTestStatistic, sigma = sigma, df = NA_real_ ) } # Bonferroni adjusted p-values else if (intersectionTest == "Bonferroni") { adjustedStageWisePValues[i, k] <- min(c(sum(indices[ i, !is.na(separatePValues[, k]) ]) * min(separatePValues[indices[i, ] == 1, k], na.rm = TRUE), 1)) } # Simes adjusted p-values else if (intersectionTest == "Simes") { adjustedStageWisePValues[i, k] <- min(sum(indices[ i, !is.na(separatePValues[, k]) ]) / (1:sum(indices[i, !is.na(separatePValues[, k])])) * sort(separatePValues[indices[i, ] == 1, k])) } # Sidak adjusted p-values else if (intersectionTest == "Sidak") { adjustedStageWisePValues[i, k] <- 1 - (1 - min(separatePValues[indices[i, ] == 1, k], na.rm = TRUE))^ sum(indices[i, !is.na(separatePValues[, k])]) } if (.isTrialDesignFisher(design)) { overallAdjustedTestStatistics[i, k] <- prod(adjustedStageWisePValues[i, 1:k]^weightsFisher[1:k]) } else { overallAdjustedTestStatistics[i, k] <- (weightsInverseNormal[1:k] %*% .getOneMinusQNorm(adjustedStageWisePValues[i, 1:k])) / sqrt(sum(weightsInverseNormal[1:k]^2)) } } if (.isTrialDesignFisher(design)) { rejectedIntersections[i, k] <- (overallAdjustedTestStatistics[i, k] <= design$criticalValues[k]) if (k < kMax) { futilityIntersections[i, k] <- (adjustedStageWisePValues[i, k] >= design$alpha0Vec[k]) } } else if (.isTrialDesignInverseNormal(design)) { rejectedIntersections[i, k] <- (overallAdjustedTestStatistics[i, k] >= design$criticalValues[k]) if (k < kMax) { futilityIntersections[i, k] <- (overallAdjustedTestStatistics[i, k] <= design$futilityBounds[k]) } } rejectedIntersections[is.na(rejectedIntersections[, k]), k] <- FALSE if (k == kMax && !rejectedIntersections[1, k]) { break } } rejectedIntersections[, k] <- rejectedIntersections[, k] | rejectedIntersectionsBefore rejectedIntersectionsBefore <- matrix(rejectedIntersections[, k], ncol = 1) for (j in 1:gMax) { rejected[j, k] <- all(rejectedIntersections[indices[, j] == 1, k], na.rm = TRUE) if (k < kMax) { futility[j, k] <- any(futilityIntersections[indices[, j] == 1, k], na.rm = TRUE) } } if (successCriterion == "all") { successStop[k] <- all(rejected[stageResults$selectedPopulations[1:gMax, k], k]) } else { successStop[k] <- any(rejected[, k]) } if (k < kMax) { futilityStop[k] <- all(futility[stageResults$selectedPopulations[1:gMax, k], k]) if (all(stageResults$selectedPopulations[1:gMax, k + 1] == FALSE)) { futilityStop[k] <- TRUE } } } return(list( separatePValues = separatePValues, adjustedStageWisePValues = adjustedStageWisePValues, overallAdjustedTestStatistics = overallAdjustedTestStatistics, rejected = rejected, rejectedIntersections = rejectedIntersections, selectedPopulations = stageResults$selectedPopulations, successStop = successStop, futilityStop = futilityStop )) } .createSimulationResultsEnrichmentObject <- function(..., design, effectList, intersectionTest, stratifiedAnalysis = NA, directionUpper = NA, # rates + survival only adaptations, typeOfSelection, effectMeasure, successCriterion, epsilonValue, rValue, threshold, plannedSubjects = NA_real_, # means + rates only plannedEvents = NA_real_, # survival only allocationRatioPlanned, minNumberOfSubjectsPerStage = NA_real_, # means + rates only maxNumberOfSubjectsPerStage = NA_real_, # means + rates only minNumberOfEventsPerStage = NA_real_, # survival only maxNumberOfEventsPerStage = NA_real_, # survival only conditionalPower, thetaH1 = NA_real_, # means + survival only stDevH1 = NA_real_, # means only piTreatmentH1 = NA_real_, # rates only piControlH1 = NA_real_, # rates only maxNumberOfIterations, seed, calcSubjectsFunction = NULL, # means + rates only calcEventsFunction = NULL, # survival only selectPopulationsFunction, showStatistics, endpoint = c("means", "rates", "survival")) { endpoint <- match.arg(endpoint) .assertIsSingleNumber(threshold, "threshold", naAllowed = FALSE) .assertIsSingleLogical(stratifiedAnalysis, "stratifiedAnalysis") .assertIsSinglePositiveInteger(rValue, "rValue", naAllowed = TRUE, validateType = FALSE) .assertIsNumericVector(allocationRatioPlanned, "allocationRatioPlanned", naAllowed = TRUE) .assertIsInOpenInterval(allocationRatioPlanned, "allocationRatioPlanned", 0, C_ALLOCATION_RATIO_MAXIMUM, naAllowed = TRUE) .assertIsSingleNumber(conditionalPower, "conditionalPower", naAllowed = TRUE) .assertIsInOpenInterval(conditionalPower, "conditionalPower", 0, 1, naAllowed = TRUE) .assertIsLogicalVector(adaptations, "adaptations", naAllowed = TRUE) if (endpoint %in% c("means", "rates")) { .assertIsNumericVector(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", naAllowed = TRUE) .assertIsNumericVector(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", naAllowed = TRUE) } else if (endpoint == "survival") { .assertIsNumericVector(minNumberOfEventsPerStage, "minNumberOfEventsPerStage", naAllowed = TRUE) .assertIsNumericVector(maxNumberOfEventsPerStage, "maxNumberOfEventsPerStage", naAllowed = TRUE) } .assertIsSinglePositiveInteger(maxNumberOfIterations, "maxNumberOfIterations", validateType = FALSE) .assertIsSingleLogical(showStatistics, "showStatistics", naAllowed = FALSE) .assertIsSingleNumber(seed, "seed", naAllowed = TRUE) if (endpoint %in% c("rates", "survival")) { .assertIsSingleLogical(directionUpper, "directionUpper") } if (endpoint %in% c("means", "survival")) { .assertIsSingleNumber(thetaH1, "thetaH1", naAllowed = TRUE) # means + survival only } if (endpoint == "means") { .assertIsSingleNumber(stDevH1, "stDevH1", naAllowed = TRUE) .assertIsInOpenInterval(stDevH1, "stDevH1", 0, NULL, naAllowed = TRUE) } successCriterion <- .assertIsValidSuccessCriterion(successCriterion) effectMeasure <- .assertIsValidEffectMeasure(effectMeasure) if (endpoint == "means") { simulationResults <- SimulationResultsEnrichmentMeans(design, showStatistics = showStatistics) } else if (endpoint == "rates") { simulationResults <- SimulationResultsEnrichmentRates(design, showStatistics = showStatistics) } else if (endpoint == "survival") { simulationResults <- SimulationResultsEnrichmentSurvival(design, showStatistics = showStatistics) } effectList <- .getValidatedEffectList(effectList, endpoint = endpoint) gMax <- .getGMaxFromSubGroups(effectList$subGroups) if (gMax > 4) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'populations' (", gMax, ") must not exceed 4") } .assertIsValidThreshold(threshold, activeArms = gMax) intersectionTest <- intersectionTest[1] .assertIsValidIntersectionTestEnrichment(design, intersectionTest) if (intersectionTest == "SpiessensDebois" && gMax > 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Spiessen & Debois intersection test cannot generally ", "be used for enrichment designs with more than two populations" ) } typeOfSelection <- .assertIsValidTypeOfSelection(typeOfSelection, rValue, epsilonValue, gMax) if (length(typeOfSelection) == 1 && typeOfSelection == "userDefined" && !is.null(threshold) && length(threshold) == 1 && threshold != -Inf) { warning("'threshold' (", threshold, ") will be ignored because 'typeOfSelection' = \"userDefined\"", call. = FALSE) threshold <- -Inf } if (length(typeOfSelection) == 1 && typeOfSelection != "userDefined" && !is.null(selectPopulationsFunction)) { warning("'selectPopulationsFunction' will be ignored because 'typeOfSelection' is not \"userDefined\"", call. = FALSE) } else if (!is.null(selectPopulationsFunction) && is.function(selectPopulationsFunction)) { simulationResults$selectPopulationsFunction <- selectPopulationsFunction } if (endpoint %in% c("rates", "survival")) { .setValueAndParameterType(simulationResults, "directionUpper", directionUpper, TRUE) } if (!stratifiedAnalysis && endpoint %in% c("means", "survival")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "For testing ", endpoint, ifelse(endpoint == "survival", " designs", ""), ", only stratified analysis is supported" ) } kMax <- design$kMax if (endpoint == "means") { stDevH1 <- .ignoreParameterIfNotUsed( "stDevH1", stDevH1, kMax > 1, "design is fixed ('kMax' = 1)", "Assumed standard deviation" ) } else if (endpoint == "rates") { .assertIsSingleNumber(piTreatmentH1, "piTreatmentH1", naAllowed = TRUE) .assertIsInOpenInterval(piTreatmentH1, "piTreatmentH1", 0, 1, naAllowed = TRUE) piTreatmentH1 <- .ignoreParameterIfNotUsed( "piTreatmentH1", piTreatmentH1, kMax > 1, "design is fixed ('kMax' = 1)", "Assumed active rate(s)" ) .setValueAndParameterType(simulationResults, "piTreatmentH1", piTreatmentH1, NA_real_) .assertIsSingleNumber(piControlH1, "piControlH1", naAllowed = TRUE) .assertIsInOpenInterval(piControlH1, "piControlH1", 0, 1, naAllowed = TRUE) piControlH1 <- .ignoreParameterIfNotUsed( "piControlH1", piControlH1, kMax > 1, "design is fixed ('kMax' = 1)", "Assumed control rate(s)" ) .setValueAndParameterType(simulationResults, "piControlH1", piControlH1, NA_real_) } else if (endpoint == "survival") { .assertIsIntegerVector(plannedEvents, "plannedEvents", validateType = FALSE) if (length(plannedEvents) != kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'plannedEvents' (", .arrayToString(plannedEvents), ") must have length ", kMax ) } .assertIsInClosedInterval(plannedEvents, "plannedEvents", lower = 1, upper = NULL) .assertValuesAreStrictlyIncreasing(plannedEvents, "plannedEvents") .setValueAndParameterType(simulationResults, "plannedEvents", plannedEvents, NA_real_) } if (endpoint %in% c("means", "rates")) { .assertIsValidPlannedSubjects(plannedSubjects, kMax) # means + rates only } if (endpoint %in% c("means", "survival")) { thetaH1 <- .ignoreParameterIfNotUsed( "thetaH1", thetaH1, kMax > 1, "design is fixed ('kMax' = 1)", "Assumed effect" ) } if (endpoint == "means") { # if (is.na(conditionalPower) && is.null(calcSubjectsFunction) && !is.na(thetaH1)) { # warning("'thetaH1' will be ignored because neither 'conditionalPower' nor ", # "'calcSubjectsFunction' is defined", call. = FALSE) # } # if (is.na(conditionalPower) && is.null(calcSubjectsFunction) && !is.na(stDevH1)) { # warning("'stDevH1' will be ignored because neither 'conditionalPower' nor ", # "'calcSubjectsFunction' is defined", call. = FALSE) # } } if (endpoint == "survival") { # if (is.na(conditionalPower) && is.null(calcEventsFunction) && !is.na(thetaH1)) { # warning("'thetaH1' will be ignored because neither 'conditionalPower' nor ", # "'calcEventsFunction' is defined", call. = FALSE) # } } conditionalPower <- .ignoreParameterIfNotUsed( "conditionalPower", conditionalPower, kMax > 1, "design is fixed ('kMax' = 1)" ) if (endpoint %in% c("means", "rates")) { # means + rates only minNumberOfSubjectsPerStage <- .ignoreParameterIfNotUsed( "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, kMax > 1, "design is fixed ('kMax' = 1)" ) minNumberOfSubjectsPerStage <- .assertIsValidNumberOfSubjectsPerStage(minNumberOfSubjectsPerStage, "minNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, calcSubjectsFunction, kMax, endpoint = endpoint ) maxNumberOfSubjectsPerStage <- .ignoreParameterIfNotUsed( "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, kMax > 1, "design is fixed ('kMax' = 1)" ) maxNumberOfSubjectsPerStage <- .assertIsValidNumberOfSubjectsPerStage(maxNumberOfSubjectsPerStage, "maxNumberOfSubjectsPerStage", plannedSubjects, conditionalPower, calcSubjectsFunction, kMax, endpoint = endpoint ) if (kMax > 1) { if (!all(is.na(maxNumberOfSubjectsPerStage - minNumberOfSubjectsPerStage)) && any(maxNumberOfSubjectsPerStage - minNumberOfSubjectsPerStage < 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") must be not smaller than minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ")" ) } .setValueAndParameterType( simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_ ) .setValueAndParameterType( simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_ ) } } else if (endpoint == "survival") { minNumberOfEventsPerStage <- .ignoreParameterIfNotUsed( "minNumberOfEventsPerStage", minNumberOfEventsPerStage, kMax > 1, "design is fixed ('kMax' = 1)" ) minNumberOfEventsPerStage <- .assertIsValidNumberOfSubjectsPerStage(minNumberOfEventsPerStage, "minNumberOfEventsPerStage", plannedEvents, conditionalPower, calcEventsFunction, kMax, endpoint = endpoint ) maxNumberOfEventsPerStage <- .ignoreParameterIfNotUsed( "maxNumberOfEventsPerStage", maxNumberOfEventsPerStage, kMax > 1, "design is fixed ('kMax' = 1)" ) maxNumberOfEventsPerStage <- .assertIsValidNumberOfSubjectsPerStage(maxNumberOfEventsPerStage, "maxNumberOfEventsPerStage", plannedEvents, conditionalPower, calcEventsFunction, kMax, endpoint = endpoint ) if (kMax > 1) { if (!all(is.na(maxNumberOfEventsPerStage - minNumberOfEventsPerStage)) && any(maxNumberOfEventsPerStage - minNumberOfEventsPerStage < 0)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfEventsPerStage' (", .arrayToString(maxNumberOfEventsPerStage), ") must be not smaller than 'minNumberOfEventsPerStage' (", .arrayToString(minNumberOfEventsPerStage), ")" ) } .setValueAndParameterType( simulationResults, "minNumberOfEventsPerStage", minNumberOfEventsPerStage, NA_real_ ) .setValueAndParameterType( simulationResults, "maxNumberOfEventsPerStage", maxNumberOfEventsPerStage, NA_real_ ) } } if (kMax == 1 && !is.na(conditionalPower)) { warning("'conditionalPower' will be ignored for fixed sample design", call. = FALSE) } if (endpoint %in% c("means", "rates") && kMax == 1 && !is.null(calcSubjectsFunction)) { warning("'calcSubjectsFunction' will be ignored for fixed sample design", call. = FALSE) } if (endpoint == "survival" && kMax == 1 && !is.null(calcEventsFunction)) { warning("'calcEventsFunction' will be ignored for fixed sample design", call. = FALSE) } if (endpoint %in% c("means", "rates") && is.na(conditionalPower) && is.null(calcSubjectsFunction)) { if (length(minNumberOfSubjectsPerStage) != 1 || !is.na(minNumberOfSubjectsPerStage)) { warning("'minNumberOfSubjectsPerStage' (", .arrayToString(minNumberOfSubjectsPerStage), ") will be ignored because ", "neither 'conditionalPower' nor 'calcSubjectsFunction' is defined", call. = FALSE ) simulationResults$minNumberOfSubjectsPerStage <- NA_real_ } if (length(maxNumberOfSubjectsPerStage) != 1 || !is.na(maxNumberOfSubjectsPerStage)) { warning("'maxNumberOfSubjectsPerStage' (", .arrayToString(maxNumberOfSubjectsPerStage), ") will be ignored because ", "neither 'conditionalPower' nor 'calcSubjectsFunction' is defined", call. = FALSE ) simulationResults$maxNumberOfSubjectsPerStage <- NA_real_ } } if (endpoint == "survival" && is.na(conditionalPower) && is.null(calcEventsFunction)) { if (length(minNumberOfEventsPerStage) != 1 || !is.na(minNumberOfEventsPerStage)) { warning("'minNumberOfEventsPerStage' (", .arrayToString(minNumberOfEventsPerStage), ") ", "will be ignored because neither 'conditionalPower' nor 'calcEventsFunction' is defined", call. = FALSE ) simulationResults$minNumberOfEventsPerStage <- NA_real_ } if (length(maxNumberOfEventsPerStage) != 1 || !is.na(maxNumberOfEventsPerStage)) { warning("'maxNumberOfEventsPerStage' (", .arrayToString(maxNumberOfEventsPerStage), ") ", "will be ignored because neither 'conditionalPower' nor 'calcEventsFunction' is defined", call. = FALSE ) simulationResults$maxNumberOfEventsPerStage <- NA_real_ } } if (endpoint %in% c("means", "rates")) { simulationResults$.setParameterType( "calcSubjectsFunction", ifelse(kMax == 1, C_PARAM_NOT_APPLICABLE, ifelse(!is.null(calcSubjectsFunction) && kMax > 1, C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE) ) ) } else if (endpoint == "survival") { simulationResults$.setParameterType( "calcEventsFunction", ifelse(kMax == 1, C_PARAM_NOT_APPLICABLE, ifelse(!is.null(calcEventsFunction) && kMax > 1, C_PARAM_USER_DEFINED, C_PARAM_DEFAULT_VALUE) ) ) } if (endpoint == "means") { if (is.null(calcSubjectsFunction)) { calcSubjectsFunction <- .getSimulationMeansEnrichmentStageSubjects } else { .assertIsValidFunction( fun = calcSubjectsFunction, funArgName = "calcSubjectsFunction", expectedFunction = .getSimulationMeansEnrichmentStageSubjects ) } simulationResults$calcSubjectsFunction <- calcSubjectsFunction } else if (endpoint == "rates") { if (is.null(calcSubjectsFunction)) { calcSubjectsFunction <- .getSimulationRatesEnrichmentStageSubjects } else { .assertIsValidFunction( fun = calcSubjectsFunction, funArgName = "calcSubjectsFunction", expectedFunction = .getSimulationRatesEnrichmentStageSubjects ) } simulationResults$calcSubjectsFunction <- calcSubjectsFunction } else if (endpoint == "survival") { if (is.null(calcEventsFunction)) { calcEventsFunction <- .getSimulationSurvivalEnrichmentStageEvents } else { .assertIsValidFunction( fun = calcEventsFunction, funArgName = "calcEventsFunction", expectedFunction = .getSimulationSurvivalEnrichmentStageEvents ) } simulationResults$calcEventsFunction <- calcEventsFunction } if (any(is.na(allocationRatioPlanned))) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, design$kMax) } else if (length(allocationRatioPlanned) != design$kMax) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'allocationRatioPlanned' (", .arrayToString(allocationRatioPlanned), ") ", "must have length 1 or ", design$kMax, " (kMax)" ) } if (length(unique(allocationRatioPlanned)) == 1) { .setValueAndParameterType( simulationResults, "allocationRatioPlanned", allocationRatioPlanned[1], defaultValue = 1 ) } else { .setValueAndParameterType( simulationResults, "allocationRatioPlanned", allocationRatioPlanned, defaultValue = rep(1, design$kMax) ) } if (endpoint %in% c("means", "rates")) { .setValueAndParameterType(simulationResults, "plannedSubjects", plannedSubjects, NA_real_) .setValueAndParameterType(simulationResults, "minNumberOfSubjectsPerStage", minNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "maxNumberOfSubjectsPerStage", maxNumberOfSubjectsPerStage, NA_real_, notApplicableIfNA = TRUE ) } else if (endpoint == "survival") { .setValueAndParameterType(simulationResults, "plannedEvents", plannedEvents, NA_real_) .setValueAndParameterType(simulationResults, "minNumberOfEventsPerStage", minNumberOfEventsPerStage, NA_real_, notApplicableIfNA = TRUE ) .setValueAndParameterType(simulationResults, "maxNumberOfEventsPerStage", maxNumberOfEventsPerStage, NA_real_, notApplicableIfNA = TRUE ) } .setValueAndParameterType(simulationResults, "conditionalPower", conditionalPower, NA_real_, notApplicableIfNA = TRUE ) if (endpoint %in% c("means", "survival")) { .setValueAndParameterType(simulationResults, "thetaH1", thetaH1, NA_real_, notApplicableIfNA = TRUE) } if (endpoint == "means") { .setValueAndParameterType(simulationResults, "stDevH1", stDevH1, NA_real_, notApplicableIfNA = TRUE) } .setValueAndParameterType( simulationResults, "maxNumberOfIterations", as.integer(maxNumberOfIterations), C_MAX_SIMULATION_ITERATIONS_DEFAULT ) simulationResults$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED)) simulationResults$seed <- .setSeed(seed) if (is.null(adaptations) || all(is.na(adaptations))) { adaptations <- rep(TRUE, kMax - 1) } if (length(adaptations) != kMax - 1) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'adaptations' must have length ", (kMax - 1), " (kMax - 1)") } .setValueAndParameterType(simulationResults, "adaptations", adaptations, rep(TRUE, kMax - 1)) simulationResults$effectList <- effectList simulationResults$.setParameterType("effectList", C_PARAM_USER_DEFINED) simulationResults$populations <- as.integer(gMax) simulationResults$.setParameterType("populations", C_PARAM_DERIVED) .setValueAndParameterType( simulationResults, "stratifiedAnalysis", stratifiedAnalysis, C_STRATIFIED_ANALYSIS_DEFAULT ) if (typeOfSelection != "userDefined") { .setValueAndParameterType(simulationResults, "threshold", threshold, -Inf) .setValueAndParameterType(simulationResults, "epsilonValue", epsilonValue, NA_real_) .setValueAndParameterType(simulationResults, "rValue", rValue, NA_real_) } .setValueAndParameterType(simulationResults, "intersectionTest", intersectionTest, C_INTERSECTION_TEST_ENRICHMENT_DEFAULT) .setValueAndParameterType(simulationResults, "typeOfSelection", typeOfSelection, C_TYPE_OF_SELECTION_DEFAULT) .setValueAndParameterType(simulationResults, "successCriterion", successCriterion, C_SUCCESS_CRITERION_DEFAULT) .setValueAndParameterType(simulationResults, "effectMeasure", effectMeasure, C_EFFECT_MEASURE_DEFAULT) warning("Simulation of enrichment designs is experimental and hence not fully validated (see www.rpact.com/experimental)", call. = FALSE) return(simulationResults) } rpact/R/f_core_utilities.R0000644000176200001440000012644214450463134015254 0ustar liggesusers## | ## | *Core utilities* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7147 $ ## | Last changed: $Date: 2023-07-03 08:10:31 +0200 (Mo, 03 Jul 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_constants.R #' @include f_logger.R NULL .getLogicalEnvironmentVariable <- function(variableName) { result <- as.logical(Sys.getenv(variableName)) return(ifelse(is.na(result), FALSE, result)) } .getPackageName <- function(functionName) { .assertIsSingleCharacter(functionName, "functionName") tryCatch( { return(environmentName(environment(get(functionName)))) }, error = function(e) { return(NA_character_) } ) } .toCapitalized <- function(x, ignoreBlackList = FALSE) { if (is.null(x) || is.na(x) || !is.character(x)) { return(x) } if (!ignoreBlackList) { if (x %in% c("pi", "pi1", "pi2", "mu", "mu1", "mu2")) { return(x) } } s <- strsplit(x, " ")[[1]] s <- paste0(toupper(substring(s, 1, 1)), substring(s, 2)) wordsToExclude <- c("And", "The", "Of", "Or", "By") s[s %in% wordsToExclude] <- tolower(s[s %in% wordsToExclude]) s <- paste(s, collapse = " ") s <- sub("non\\-binding", "Non-Binding", s) s <- sub("binding", "Binding", s) return(s) } .formatCamelCaseSingleWord <- function(x, title = FALSE) { if (length(x) == 0 || nchar(trimws(x)) == 0) { return(x) } indices <- gregexpr("[A-Z]", x)[[1]] parts <- strsplit(x, "[A-Z]")[[1]] result <- "" for (i in 1:length(indices)) { index <- indices[i] y <- tolower(substring(x, index, index)) if (title) { y <- .firstCharacterToUpperCase(y) } value <- ifelse(title, .firstCharacterToUpperCase(parts[i]), parts[i]) result <- paste0(result, value, " ", y) } if (length(parts) > length(indices)) { result <- paste0(result, parts[length(parts)]) } return(trimws(result)) } .formatCamelCase <- function(x, title = FALSE, ..., ignoreBlackList = FALSE) { words <- strsplit(x, " ")[[1]] parts <- character(0) for (word in words) { parts <- c(parts, .formatCamelCaseSingleWord(word, title = title)) } result <- paste0(parts, collapse = " ") if (grepl(" $", x)) { result <- paste0(result, " ") } if (title) { result <- .toCapitalized(result, ignoreBlackList = ignoreBlackList) } if (grepl(" $", x) && !grepl(" $", result)) { result <- paste0(result, " ") } return(result) } .firstCharacterToUpperCase <- function(x, ..., sep = "") { args <- list(...) if (length(args) > 0) { x <- paste(x, unlist(args, use.names = FALSE), collapse = sep, sep = sep) } substr(x, 1, 1) <- toupper(substr(x, 1, 1)) return(x) } .equalsRegexpIgnoreCase <- function(x, pattern) { x <- tolower(x) pattern <- tolower(pattern) result <- grep(pattern, x) return(sum(result) > 0) } #' #' @title #' Get Optional Argument #' #' @description #' Returns the value of an optional argument if it exists. #' #' @param optionalArgumentName the name of the optional argument. #' #' @details #' Internal function. #' #' @return the value of the optional argument if it exists; NULL otherwise. #' #' @examples #' f <- function(...) { #' print(.getOptionalArgument("x", ...)) #' } #' f(x = 1) #' f(y = 1) #' #' @keywords internal #' #' @noRd #' .getOptionalArgument <- function(optionalArgumentName, ..., optionalArgumentDefaultValue = NULL) { args <- list(...) if (optionalArgumentName %in% names(args)) { return(args[[optionalArgumentName]]) } return(optionalArgumentDefaultValue) } .isUndefinedArgument <- function(arg) { if (missing(arg) || is.null(arg)) { return(TRUE) } tryCatch( { if (length(arg) == 0) { return(TRUE) } if (length(arg) > 1) { return(FALSE) } }, error = function(e) { paramName <- deparse(substitute(arg)) .logWarn( "Failed to execute '.isUndefinedArgument(%s)' ('%s' is an instance of class '%s'): %s", paramName, paramName, .getClassName(arg), e ) } ) return(is.na(arg)) } .isDefinedArgument <- function(arg, argumentExistsValidationEnabled = TRUE) { paramName <- deparse(substitute(arg)) if (argumentExistsValidationEnabled && length(grep("\\$|\\[|\\]", paramName)) == 0 && !exists(paramName)) { tryCatch( { if (missing(arg) || is.null(arg)) { return(FALSE) } }, error = function(e) { stop( C_EXCEPTION_TYPE_MISSING_ARGUMENT, "the object '", paramName, "' has not been defined anywhere. ", "Please define it first, e.g., run '", paramName, " <- 1'" ) } ) } if (missing(arg) || is.null(arg)) { return(FALSE) } tryCatch( { if (length(arg) == 0) { return(FALSE) } if (length(arg) > 1) { return(TRUE) } }, error = function(e) { paramName <- deparse(substitute(arg)) .logWarn( "Failed to execute '.isDefinedArgument(%s)' ('%s' is an instance of class '%s'): %s", paramName, paramName, .getClassName(arg), e ) } ) return(!is.na(arg)) } .getConcatenatedValues <- function(x, separator = ", ", mode = c("csv", "vector", "and", "or")) { if (is.null(x) || length(x) <= 1) { return(x) } mode <- match.arg(mode) if (mode %in% c("csv", "vector")) { result <- paste(x, collapse = separator) if (mode == "vector") { result <- paste0("c(", result, ")") } return(result) } if (length(x) == 2) { return(paste(x, collapse = paste0(" ", mode, " "))) } space <- ifelse(grepl(" $", separator), "", " ") part1 <- x[1:length(x) - 1] part2 <- x[length(x)] return(paste0(paste(part1, collapse = separator), separator, space, mode, " ", part2)) } #' #' @examples #' .getConcatenatedValues(1) #' .getConcatenatedValues(1:2) #' .getConcatenatedValues(1:3) #' .getConcatenatedValues(1, mode = "vector") #' .getConcatenatedValues(1:2, mode = "vector") #' .getConcatenatedValues(1:3, mode = "vector") #' .getConcatenatedValues(1, mode = "and") #' .getConcatenatedValues(1:2, mode = "and") #' .getConcatenatedValues(1:3, mode = "and") #' .getConcatenatedValues(1, mode = "or") #' .getConcatenatedValues(1:2, mode = "or") #' .getConcatenatedValues(1:3, mode = "or") #' .getConcatenatedValues(1, mode = "or", separator = ";") #' .getConcatenatedValues(1:2, mode = "or", separator = ";") #' .getConcatenatedValues(1:3, mode = "or", separator = ";") #' #' @noRd #' .arrayToString <- function(x, ..., separator = ", ", vectorLookAndFeelEnabled = FALSE, encapsulate = FALSE, digits = 3, maxLength = 80L, maxCharacters = 160L, mode = c("csv", "vector", "and", "or")) { .assertIsSingleInteger(digits, "digits", naAllowed = TRUE, validateType = FALSE) .assertIsInClosedInterval(digits, "digits", lower = 0, upper = NULL) .assertIsSingleInteger(maxLength, "maxLength", naAllowed = FALSE, validateType = FALSE) .assertIsInClosedInterval(maxLength, "maxLength", lower = 1, upper = NULL) .assertIsSingleInteger(maxCharacters, "maxCharacters", naAllowed = FALSE, validateType = FALSE) .assertIsInClosedInterval(maxCharacters, "maxCharacters", lower = 3, upper = NULL) if (missing(x) || is.null(x) || length(x) == 0) { return("NULL") } if (length(x) == 1 && is.na(x)) { return("NA") } if (!is.numeric(x) && !is.character(x) && !is.logical(x) && !is.integer(x)) { return(.getClassName(x)) } if (is.numeric(x) && !is.na(digits)) { if (digits > 0) { indices <- which(!is.na(x) & abs(x) >= 10^-digits) } else { indices <- which(!is.na(x)) } x[indices] <- as.character(round(x[indices], digits)) } mode <- match.arg(mode) if (mode == "csv" && vectorLookAndFeelEnabled) { mode <- "vector" } if (is.matrix(x) && nrow(x) > 1 && ncol(x) > 1) { result <- c() for (i in 1:nrow(x)) { row <- x[i, ] if (encapsulate) { row <- paste0("'", row, "'") } result <- c(result, paste0("(", paste(row, collapse = separator), ")")) } return(.getConcatenatedValues(result, separator = separator, mode = mode)) } if (encapsulate) { x <- paste0("'", x, "'") } if (length(x) > maxLength) { x <- c(x[1:maxLength], "...") } s <- .getConcatenatedValues(x, separator = separator, mode = mode) if (nchar(s) > maxCharacters && length(x) > 1) { s <- x[1] index <- 2 while (nchar(paste0(s, separator, x[index])) <= maxCharacters && index <= length(x)) { s <- paste0(s, separator, x[index]) index <- index + 1 } s <- paste0(s, separator, "...") if (vectorLookAndFeelEnabled && length(x) > 1) { s <- paste0("c(", s, ")") } } return(s) } .listToString <- function(a, separator = ", ", listLookAndFeelEnabled = FALSE, encapsulate = FALSE) { if (missing(a) || is.null(a) || length(a) == 0) { return("NULL") } if (length(a) == 1 && is.na(a)) { return("NA") } result <- "" for (name in names(a)) { value <- a[[name]] if (is.list(value)) { value <- .listToString(value, separator = separator, listLookAndFeelEnabled = listLookAndFeelEnabled, encapsulate = encapsulate ) if (!listLookAndFeelEnabled) { value <- paste0("{", value, "}") } } else { if (length(value) > 1) { value <- .arrayToString(value, separator = separator, encapsulate = encapsulate ) value <- paste0("(", value, ")") } else if (encapsulate) { value <- sQuote(value) } } entry <- paste(name, "=", value) if (nchar(result) > 0) { result <- paste(result, entry, sep = ", ") } else { result <- entry } } if (!listLookAndFeelEnabled) { return(result) } return(paste0("list(", result, ")")) } .getInputForZeroOutputInsideTolerance <- function(input, output, tolerance = .Machine$double.eps^0.25) { if (is.null(tolerance) || is.na(tolerance)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'tolerance' must be a valid double") } if (tolerance < 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'tolerance' (", tolerance, ") must be >= 0") } if (is.null(input)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'input' must be a valid double or NA") } if (is.null(output) || is.na(output)) { return(NA_real_) } if (abs(output) <= tolerance) { return(input) } return(NA_real_) } .getInputProducingZeroOutput <- function(input1, output1, input2, output2, tolerance = .Machine$double.eps^0.25) { if ((is.na(output1) || is.null(output1)) && (is.na(output2) || is.null(output2))) { return(NA_real_) } if (is.na(output1) || is.null(output1)) { return(.getInputForZeroOutputInsideTolerance(input2, output2, tolerance)) } if (is.na(output2) || is.null(output2)) { return(.getInputForZeroOutputInsideTolerance(input1, output1, tolerance)) } if (abs(output1) <= abs(output2) && !is.na(input1)) { return(.getInputForZeroOutputInsideTolerance(input1, output1, tolerance)) } return(.getInputForZeroOutputInsideTolerance(input2, output2, tolerance)) } #' #' @title #' Get One Dimensional Root #' #' @description #' Searches and returns the one dimensional root of a function using \code{uniroot}. #' #' @param acceptResultsOutOfTolerance if \code{TRUE}, results will be accepted in any case; #' if \code{FALSE}, \code{NA_real_} will be returned in case of tolerance discrepancy #' #' @details #' Internal function. #' #' @return the root. #' #' @keywords internal #' #' @noRd #' .getOneDimensionalRoot <- function(fun, ..., lower, upper, tolerance = .Machine$double.eps^0.25, acceptResultsOutOfTolerance = FALSE, suppressWarnings = TRUE, callingFunctionInformation = NA_character_, cppEnabled = FALSE) { .assertIsSingleNumber(lower, "lower") .assertIsSingleNumber(upper, "upper") .assertIsSingleNumber(tolerance, "tolerance") resultLower <- fun(lower, ...) resultUpper <- fun(upper, ...) result <- .getInputProducingZeroOutput(lower, resultLower, upper, resultUpper, tolerance) if (!is.na(result)) { return(result) } unirootResult <- NULL tryCatch( { unirootResult <- stats::uniroot( f = fun, lower = lower, upper = upper, tol = tolerance, trace = 2, extendInt = "no", ... ) }, warning = function(w) { .logWarn( .getCallingFunctionInformation(callingFunctionInformation), "uniroot(f, lower = %s, upper = %s, tol = %s) produced a warning: %s", lower, upper, tolerance, w ) }, error = function(e) { msg <- "Failed to run uniroot(f, lower = %s, upper = %s, tol = %s): %s" if (getLogLevel() == C_LOG_LEVEL_DEBUG) { .logError(msg, lower, upper, tolerance, e) } else { .logWarn(msg, lower, upper, tolerance, e) } } ) if (!is.null(unirootResult) && abs(unirootResult$f.root) <= max(tolerance * 100, 1e-07) * 1.2) { return(unirootResult$root) } if (cppEnabled && missing(...)) { tryCatch( { zeroinResult <- zeroin(fun, lower, upper, tolerance, 100) }, warning = function(w) { .logWarn( .getCallingFunctionInformation(callingFunctionInformation), "zeroin(f, lower = %s, upper = %s, tol = %s) produced a warning: %s", lower, upper, tolerance, w ) }, error = function(e) { msg <- "Failed to run zeroin(f, lower = %s, upper = %s, tol = %s): %s" if (getLogLevel() == C_LOG_LEVEL_DEBUG) { .logError(msg, lower, upper, tolerance, e) } else { .logWarn(msg, lower, upper, tolerance, e) } } ) if (!is.null(zeroinResult) && !(abs(fun(zeroinResult)) > max(tolerance * 100, 1e-07))) { return(zeroinResult) } } if (is.null(unirootResult)) { direction <- ifelse(fun(lower) < fun(upper), 1, -1) if (is.na(direction)) { return(NA_real_) } return(.getOneDimensionalRootBisectionMethod( fun = fun, lower = lower, upper = upper, tolerance = tolerance, acceptResultsOutOfTolerance = acceptResultsOutOfTolerance, direction = direction, suppressWarnings = suppressWarnings, callingFunctionInformation = callingFunctionInformation )) } if (!acceptResultsOutOfTolerance) { if (!suppressWarnings) { warning(.getCallingFunctionInformation(callingFunctionInformation), "NA returned because root search by 'uniroot' produced a function result (", unirootResult$f.root, ") that differs from target 0 ", "(lower = ", lower, ", upper = ", upper, ", tolerance = ", tolerance, ", last function argument was ", unirootResult$root, ")", call. = FALSE ) } return(NA_real_) } else if (!suppressWarnings) { warning(.getCallingFunctionInformation(callingFunctionInformation), "Root search by 'uniroot' produced a function result (", unirootResult$f.root, ") ", "that differs from target 0 ", "(lower = ", lower, ", upper = ", upper, ", tolerance = ", tolerance, ", last function argument was ", unirootResult$root, ")", call. = FALSE ) } return(unirootResult$root) } .getCallingFunctionInformation <- function(x) { if (is.na(x)) { return("") } return(paste0(x, ": ")) } #' #' @title #' Get One Dimensional Root Bisection Method #' #' @description #' Searches and returns the one dimensional root of a function using the bisection method. #' #' @param acceptResultsOutOfTolerance if \code{TRUE}, results will be accepted in any case; #' if \code{FALSE}, \code{NA_real_} will be returned in case of tolerance discrepancy #' #' @details #' Internal function. #' #' @keywords internal #' #' @noRd #' .getOneDimensionalRootBisectionMethod <- function(fun, ..., lower, upper, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT, acceptResultsOutOfTolerance = FALSE, maxSearchIterations = 50, direction = 0, suppressWarnings = TRUE, callingFunctionInformation = NA_character_) { lowerStart <- lower upperStart <- upper if (direction == 0) { direction <- ifelse(fun(lower) < fun(upper), 1, -1) } .logTrace( "Start special root search: lower = %s, upper = %s, tolerance = %s, direction = %s", lower, upper, tolerance, direction ) precision <- 1 while (!is.na(precision) && precision > tolerance) { argument <- (lower + upper) / 2 result <- fun(argument) .logTrace( "Root search step: f(%s, lower = %s, upper = %s, direction = %s) = %s", argument, lower, upper, direction, result ) ifelse(result * direction < 0, lower <- argument, upper <- argument) maxSearchIterations <- maxSearchIterations - 1 if (maxSearchIterations < 0) { if (!suppressWarnings) { warning(.getCallingFunctionInformation(callingFunctionInformation), "Root search via 'bisection' stopped: maximum number of search iterations reached. ", "Check if lower and upper search bounds were calculated correctly", call. = FALSE ) } .plotMonotoneFunctionRootSearch(fun, lowerStart, upperStart) return(NA_real_) } precision <- upper - lower } if (is.infinite(result) || abs(result) > max(tolerance * 100, 1e-07)) { # 0.01) { # tolerance * 20 .plotMonotoneFunctionRootSearch(fun, lowerStart, upperStart) if (!acceptResultsOutOfTolerance) { if (!suppressWarnings) { warning(.getCallingFunctionInformation(callingFunctionInformation), "NA returned because root search via 'bisection' produced a function result (", result, ") that differs from target 0 ", "(tolerance is ", tolerance, ", last function argument was ", argument, ")", call. = FALSE ) } return(NA_real_) } else if (!suppressWarnings) { warning(.getCallingFunctionInformation(callingFunctionInformation), "Root search via 'bisection' produced a function result (", result, ") ", "that differs from target 0 ", "(tolerance is ", tolerance, ", last function argument was ", argument, ")", call. = FALSE ) } } return(argument) } .plotMonotoneFunctionRootSearch <- function(f, lowerStart, upperStart) { if (getLogLevel() != C_LOG_LEVEL_TRACE) { return(invisible()) } values <- c() params <- seq(from = lowerStart, to = upperStart, by = (upperStart - lowerStart) / 20) for (i in params) { values <- c(values, f(i)) } graphics::plot(params, values) } .getTextLineWithLineBreak <- function(line, lineBreakIndex) { index <- .getSpaceIndex(line, lineBreakIndex) if (index == -1) { return(line) } a <- substr(line, 0, index - 1) b <- substr(line, index + 1, nchar(line)) return(paste0(a, "\n", b)) } .getSpaceIndex <- function(line, lineBreakIndex) { if (nchar(line) <= lineBreakIndex) { return(-1) } if (regexpr("\\n", line) > 0) { return(-1) } len <- nchar(line) lineSplit <- strsplit(line, "")[[1]] for (i in (len / 2):length(lineSplit)) { char <- lineSplit[i] if (char == " ") { return(i) } } return(-1) } .isFirstValueGreaterThanSecondValue <- function(firstValue, secondValue) { if (is.null(firstValue) || length(firstValue) != 1 || is.na(firstValue)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'firstValue' (", firstValue, ") must be a valid numeric value" ) } if (is.null(secondValue) || length(secondValue) != 1 || is.na(secondValue)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'secondValue' (", secondValue, ") must be a valid numeric value" ) } return(firstValue > secondValue) } .isFirstValueSmallerThanSecondValue <- function(firstValue, secondValue) { if (is.null(firstValue) || length(firstValue) != 1 || is.na(firstValue)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'firstValue' (", firstValue, ") must be a valid numeric value" ) } if (is.null(secondValue) || length(secondValue) != 1 || is.na(secondValue)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'secondValue' (", secondValue, ") must be a valid numeric value" ) } return(firstValue < secondValue) } .setParameterType <- function(parameterSet, parameterName, parameterType) { if (is.null(parameterSet)) { return(invisible()) } parameterSet$.setParameterType(parameterName, parameterType) } .setValueAndParameterType <- function(parameterSet, parameterName, value, defaultValue, notApplicableIfNA = FALSE) { .assertIsParameterSetClass(parameterSet, "parameterSet") if (is.null(parameterSet)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'parameterSet' must be not null") } if (!(parameterName %in% names(parameterSet$getRefClass()$fields()))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", .getClassName(parameterSet), "' does not contain a field with name '", parameterName, "'" ) } parameterSet[[parameterName]] <- value if (notApplicableIfNA && all(is.na(value))) { parameterSet$.setParameterType(parameterName, C_PARAM_NOT_APPLICABLE) } else if (!is.null(value) && length(value) == length(defaultValue) && ( (all(is.na(value)) && all(is.na(value) == is.na(defaultValue))) || (!is.na(all(value == defaultValue)) && all(value == defaultValue)) )) { parameterSet$.setParameterType(parameterName, C_PARAM_DEFAULT_VALUE) } else { parameterSet$.setParameterType(parameterName, C_PARAM_USER_DEFINED) } } .isDefaultVector <- function(x, default) { if (length(x) != length(default)) { return(FALSE) } return(sum(x == default) == length(x)) } .getNumberOfZerosDirectlyAfterDecimalSeparator <- function(x) { zeroCounter <- 0 startEnabled <- FALSE x <- round(x, 15) x <- sprintf("%.15f", x) for (i in 1:nchar(x)) { num <- substring(x, i, i) if (num == ".") { startEnabled <- TRUE } else if (startEnabled) { if (num == "0") { zeroCounter <- zeroCounter + 1 } else { return(zeroCounter) } } } return(zeroCounter) } .getNextHigherValue <- function(x) { .assertIsNumericVector(x, "x") values <- c() for (value in x) { value <- round(value, 15) values <- c(values, 1 / 10^.getNumberOfZerosDirectlyAfterDecimalSeparator(value)) } return(values) } .getVariedParameterVectorByValue <- function(variedParameter) { return((variedParameter[2] - variedParameter[1]) / C_VARIED_PARAMETER_SEQUENCE_LENGTH_DEFAULT) } .getVariedParameterVector <- function(variedParameter, variedParameterName) { if (is.null(variedParameter) || length(variedParameter) != 2 || any(is.na(variedParameter))) { return(variedParameter) } minValue <- variedParameter[1] maxValue <- variedParameter[2] if (minValue == maxValue) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", variedParameterName, "' with length 2 must contain minimum != maximum (", minValue, " == ", maxValue, ")" ) } by <- .getVariedParameterVectorByValue(variedParameter) variedParameter <- seq(minValue, maxValue, by) return(variedParameter) } .getVariedParameterVectorSeqCommand <- function(variedParameter) { return(paste0( "seq(", round(variedParameter[1], 4), ", ", round(variedParameter[2], 4), ", ", round(.getVariedParameterVectorByValue(variedParameter), 6), ")" )) } .getNumberOfSubjects1 <- function(numberOfSubjects, allocationRatioPlanned) { return((numberOfSubjects * allocationRatioPlanned) / (allocationRatioPlanned + 1)) } .getNumberOfSubjects2 <- function(numberOfSubjects, allocationRatioPlanned) { return(numberOfSubjects / (allocationRatioPlanned + 1)) } .fillWithNAs <- function(x, kMax) { if (length(x) >= kMax) { return(x) } x[(length(x) + 1):kMax] <- NA_real_ return(x) } .matchArgument <- function(arg, defaultValue) { if (any(is.na(arg))) { return(defaultValue) } return(ifelse(length(arg) > 0, arg[1], defaultValue)) } #' @title #' Print Citation # #' @description #' How to cite \code{rpact} and \code{R} in publications. #' #' @param inclusiveR If \code{TRUE} (default) the information on how to cite the base R system in publications will be added. #' @param language Language code to use for the output, default is "en". #' #' @details #' This function shows how to cite \code{rpact} and \code{R} (\code{inclusiveR = TRUE}) in publications. #' #' @examples #' printCitation() #' #' @keywords internal #' #' @export #' printCitation <- function(inclusiveR = TRUE, language = "en") { currentLanguage <- Sys.getenv("LANGUAGE") tryCatch( { Sys.setenv(LANGUAGE = language) if (inclusiveR) { citR <- utils::capture.output(print(citation("base"), bibtex = FALSE)) indices <- which(citR == "") indices <- indices[indices != 1 & indices != length(citR)] if (length(indices) > 1) { index <- indices[length(indices)] citR <- citR[1:min(index, length(citR))] } cat("\n", trimws(paste(citR, collapse = "\n")), "\n", sep = "") } print(citation("rpact"), bibtex = FALSE) }, finally = { Sys.setenv(LANGUAGE = currentLanguage) } ) } .writeLinesToFile <- function(lines, fileName) { if (is.null(lines) || length(lines) == 0 || !is.character(lines)) { warning("Empty lines. Stop to write '", fileName, "'") return(invisible(fileName)) } fileConn <- base::file(fileName) tryCatch( { base::writeLines(lines, fileConn) }, finally = { base::close(fileConn) } ) invisible(fileName) } #' #' Windows: CR (Carriage Return \r) and LF (LineFeed \n) pair #' #' OSX, Linux: LF (LineFeed \n) #' #' @noRd #' .readLinesFromFile <- function(inputFileName) { content <- .readContentFromFile(inputFileName) return(strsplit(content, split = "(\r?\n)|(\r\n?)")[[1]]) } .readContentFromFile <- function(inputFileName) { return(readChar(inputFileName, file.info(inputFileName)$size)) } .integerToWrittenNumber <- function(x) { if (is.null(x) || length(x) != 1 || !is.numeric(x) || is.na(x)) { return(x) } temp <- c("one", "two", "three", "four", "five", "six", "seven", "eight", "nine") if (x >= 1 && x <= length(temp) && as.integer(x) == x) { return(temp[x]) } return(as.character(x)) } .getFunctionAsString <- function(fun, stringWrapPrefix = " ", stringWrapParagraphWidth = 90) { .assertIsFunction(fun) s <- utils::capture.output(print(fun)) s <- s[!grepl("bytecode", s)] s <- s[!grepl("environment", s)] if (is.null(stringWrapPrefix) || is.na(stringWrapPrefix) || nchar(stringWrapPrefix) == 0) { stringWrapPrefix <- " " } s <- gsub("\u0009", stringWrapPrefix, s) # \t if (!is.null(stringWrapParagraphWidth) && !is.na(stringWrapParagraphWidth)) { # s <- paste0(s, collapse = "\n") } return(s) } .getFunctionArgumentNames <- function(fun, ignoreThreeDots = FALSE) { .assertIsFunction(fun) args <- methods::formalArgs(fun) if (ignoreThreeDots) { args <- args[args != "..."] } return(args) } .getDecimalPlaces <- function(values) { if (is.null(values) || length(values) == 0) { return(integer(0)) } values[is.na(values)] <- 0 decimalPlaces <- c() for (value in values) { decimalPlaces <- c( decimalPlaces, nchar(sub("^\\d+\\.", "", sub("0*$", "", format(round(value, 15), scientific = FALSE)))) ) } return(decimalPlaces) } #' #' @title #' Get Parameter Caption #' #' @description #' Returns the parameter caption for a given object and parameter name. #' #' @details #' This function identifies and returns the caption that will be used in print outputs of an rpact result object. #' #' @seealso #' \code{\link[=getParameterName]{getParameterName()}} for getting the parameter name for a given caption. #' #' @return Returns a \code{\link[base]{character}} of specifying the corresponding caption of a given parameter name. #' Returns \code{NULL} if the specified \code{parameterName} does not exist. #' #' @examples #' getParameterCaption(getDesignInverseNormal(), "kMax") #' #' @keywords internal #' #' @export #' getParameterCaption <- function(obj, parameterName) { if (is.null(obj) || length(obj) != 1 || !isS4(obj) || !inherits(obj, "FieldSet")) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'obj' (", .getClassName(obj), ") must be an rpact result object") } .assertIsSingleCharacter(parameterName, "parameterName", naAllowed = FALSE) design <- NULL designPlan <- NULL if (inherits(obj, "TrialDesignPlan")) { designPlan <- obj design <- obj$.design } else if (inherits(obj, "TrialDesign")) { design <- obj } else { design <- obj[[".design"]] } parameterNames <- .getParameterNames(design = design, designPlan = designPlan) if (is.null(parameterNames) || length(parameterNames) == 0) { return(NULL) } return(parameterNames[[parameterName]]) } #' #' @title #' Get Parameter Name #' #' @description #' Returns the parameter name for a given object and parameter caption. #' #' @details #' This function identifies and returns the parameter name for a given caption #' that will be used in print outputs of an rpact result object. #' #' @seealso #' \code{\link[=getParameterCaption]{getParameterCaption()}} for getting the parameter caption for a given name. #' #' @return Returns a \code{\link[base]{character}} of specifying the corresponding name of a given parameter caption. #' Returns \code{NULL} if the specified \code{parameterCaption} does not exist. #' #' @examples #' getParameterName(getDesignInverseNormal(), "Maximum number of stages") #' #' @keywords internal #' #' @export #' getParameterName <- function(obj, parameterCaption) { if (is.null(obj) || length(obj) != 1 || !isS4(obj) || !inherits(obj, "FieldSet")) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'obj' (", .getClassName(obj), ") must be an rpact result object") } .assertIsSingleCharacter(parameterCaption, "parameterCaption", naAllowed = FALSE) design <- NULL designPlan <- NULL if (inherits(obj, "TrialDesignPlan")) { designPlan <- obj design <- obj$.design } else if (inherits(obj, "TrialDesign")) { design <- obj } else { design <- obj[[".design"]] } parameterNames <- .getParameterNames(design = design, designPlan = designPlan) if (is.null(parameterNames) || length(parameterNames) == 0) { return(NULL) } return(unique(names(parameterNames)[parameterNames == parameterCaption])) } .removeLastEntryFromArray <- function(x) { if (!is.array(x)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'x' (", .getClassName(x), ") must be an array") } dataDim <- dim(x) if (length(dataDim) != 3) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "function .removeLastEntryFromArray() only works for 3-dimensional arrays") } if (dataDim[3] < 2) { return(NA_real_) } dataDim[3] <- dataDim[3] - 1 subData <- x[, , 1:dataDim[3]] return(array(data = subData, dim = dataDim)) } .moveColumn <- function(data, columnName, insertPositionColumnName) { if (!is.data.frame(data)) { stop("Illegal argument: 'data' (", .getClassName(data), ") must be a data.frame") } if (is.null(insertPositionColumnName) || length(insertPositionColumnName) != 1 || is.na(insertPositionColumnName) || !is.character(insertPositionColumnName)) { stop( "Illegal argument: 'insertPositionColumnName' (", .getClassName(insertPositionColumnName), ") must be a valid character value" ) } if (is.null(columnName) || length(columnName) != 1 || is.na(columnName) || !is.character(columnName)) { stop("Illegal argument: 'columnName' (", .getClassName(columnName), ") must be a valid character value") } colNames <- colnames(data) if (!(columnName %in% colNames)) { stop("Illegal argument: 'columnName' (", columnName, ") does not exist in the specified data.frame 'data'") } if (!(insertPositionColumnName %in% colNames)) { stop( "Illegal argument: 'insertPositionColumnName' (", insertPositionColumnName, ") does not exist in the specified data.frame 'data'" ) } if (columnName == insertPositionColumnName) { return(data) } colNames <- colNames[colNames != columnName] insertPositioIndex <- which(colNames == insertPositionColumnName) if (insertPositioIndex != (which(colnames(data) == columnName) - 1)) { if (insertPositioIndex == length(colNames)) { data <- data[, c(colNames[1:insertPositioIndex], columnName)] } else { data <- data[, c(colNames[1:insertPositioIndex], columnName, colNames[(insertPositioIndex + 1):length(colNames)])] } } return(data) } #' @examples #' or1 <- list( #' and1 = FALSE, #' and2 = TRUE, #' and3 = list( #' or1 = list( #' and1 = TRUE, #' and2 = TRUE #' ), #' or2 = list( #' and1 = TRUE, #' and2 = TRUE, #' and3 = TRUE #' ), #' or3 = list( #' and1 = TRUE, #' and2 = TRUE, #' and3 = TRUE, #' and4 = TRUE, #' and5 = TRUE #' ) #' ) #' ) #' #' @noRd #' .isConditionTrue <- function(x, condType = c("and", "or"), xName = NA_character_, level = 0, showDebugMessages = FALSE) { if (is.logical(x)) { if (showDebugMessages) { message(rep("\t", level), x, "") } return(x) } condType <- match.arg(condType) if (is.list(x)) { listNames <- names(x) if (is.null(listNames) || any(is.na(listNames)) || any(trimws(listNames) == "")) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "list (", .arrayToString(unlist(x)), ") must be named") } results <- logical(0) for (listName in listNames) { type <- gsub("\\d*", "", listName) if (!(type %in% c("and", "or"))) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "all list names (", type, " / ", listName, ") must have the format 'and[number]' or 'or[number]', where [number] is an integer" ) } subList <- x[[listName]] result <- .isConditionTrue(subList, condType = type, xName = listName, level = level + 1, showDebugMessages = showDebugMessages ) results <- c(results, result) } if (condType == "and") { result <- all(results == TRUE) if (showDebugMessages) { message(rep("\t", level), result, " (before: and)") } return(result) } result <- any(results == TRUE) if (showDebugMessages) { message(rep("\t", level), result, " (before: or)") } return(result) } stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "x must be of type logical or list (is ", .getClassName(x)) } .getClassName <- function(x) { return(as.character(class(x))[1]) } .isPackageInstalled <- function(packageName) { return(nzchar(try(system.file(package = packageName), silent = TRUE))) } .getQNorm <- function(p, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE, epsilon = C_QNORM_EPSILON) { if (any(p < -1e-07 | p > 1 + 1e-07, na.rm = TRUE)) { warning("Tried to get qnorm() from ", .arrayToString(p), " which is out of interval (0, 1)") } p[p <= 0] <- epsilon p[p > 1] <- 1 result <- stats::qnorm(p, mean = mean, sd = sd, lower.tail = lower.tail, log.p = log.p) result[result < -C_QNORM_THRESHOLD] <- C_QNORM_MINIMUM result[result > C_QNORM_THRESHOLD] <- C_QNORM_MAXIMUM return(result) } .getOneMinusQNorm <- function(p, mean = 0, sd = 1, lower.tail = TRUE, log.p = FALSE, ..., epsilon = C_QNORM_EPSILON) { if (all(is.na(p))) { return(p) } if (any(p < -1e-07 | p > 1 + 1e-07, na.rm = TRUE)) { warning("Tried to get 1 - qnorm() from ", .arrayToString(p), " which is out of interval (0, 1)") } p[p <= 0] <- epsilon p[p > 1] <- 1 indices <- p < 0.5 indices[is.na(indices)] <- FALSE result <- rep(NA_real_, length(p)) if (is.matrix(p)) { result <- matrix(result, ncol = ncol(p)) } if (any(indices)) { result[indices] <- -stats::qnorm(p[indices], mean = mean, sd = sd, lower.tail = lower.tail, log.p = log.p ) } # prevent values that are close to 1 from becoming Inf, see qnorm(1) # example: 1 - 1e-17 = 1 in R, i.e., qnorm(1 - 1e-17) = Inf # on the other hand: qnorm(1e-323) = -38.44939 if (any(!indices)) { result[!indices] <- stats::qnorm(1 - p[!indices], mean = mean, sd = sd, lower.tail = lower.tail, log.p = log.p ) } result[result < -C_QNORM_THRESHOLD] <- C_QNORM_MINIMUM result[result > C_QNORM_THRESHOLD] <- C_QNORM_MAXIMUM return(result) } .moveValue <- function(values, value, insertPositionValue) { if (is.null(insertPositionValue) || length(insertPositionValue) != 1 || is.na(insertPositionValue)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'insertPositionValue' (", class(insertPositionValue), ") must be a valid single value" ) } if (is.null(value) || length(value) != 1 || is.na(value)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'value' (", class(value), ") must be a valid single value" ) } if (!(value %in% values)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'value' (", value, ") does not exist in the specified vector 'values'" ) } if (!(insertPositionValue %in% values)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'insertPositionValue' (", insertPositionValue, ") does not exist in the specified vector 'values'" ) } if (value == insertPositionValue) { return(values) } originalValues <- values values <- values[values != value] insertPositioIndex <- which(values == insertPositionValue) if (insertPositioIndex != (which(originalValues == value) - 1)) { if (insertPositioIndex == length(values)) { values <- c(values[1:insertPositioIndex], value) } else { values <- c(values[1:insertPositioIndex], value, values[(insertPositioIndex + 1):length(values)]) } } return(values) } .reconstructSequenceCommand <- function(values) { if (length(values) == 0 || all(is.na(values))) { return(NA_character_) } if (length(values) <= 3 || any(is.na(values))) { return(.arrayToString(values, vectorLookAndFeelEnabled = (length(values) != 1))) } minValue <- min(values) maxValue <- max(values) by <- (maxValue - minValue) / (length(values) - 1) valuesTemp <- seq(minValue, maxValue, by) if (isTRUE(all.equal(values, valuesTemp, tolerance = 1e-10))) { return(paste0("seq(", minValue, ", ", maxValue, ", ", by, ")")) } return(.arrayToString(values, vectorLookAndFeelEnabled = TRUE, maxLength = 10)) } .isSummaryPipe <- function(fCall) { tryCatch( { xCall <- deparse(fCall$x) return(identical(xCall[1], ".") || grepl("^summary\\(", xCall[1])) }, error = function(e) { return(FALSE) } ) } rpact/R/f_core_output_formats.R0000644000176200001440000011573414446300510016327 0ustar liggesusers## | ## | *Output formats* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7132 $ ## | Last changed: $Date: 2023-06-26 14:15:08 +0200 (Mon, 26 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_constants.R #' @include f_core_utilities.R NULL C_ROUND_FUNCTIONS <- c("ceiling", "floor", "trunc", "round", "signif") C_OUTPUT_FORMAT_ARGUMENTS <- c( "digits", "nsmall", "trimSingleZeros", "futilityProbabilityEnabled", "roundFunction" ) C_OUTPUT_FORMAT_DEFAULT_VALUES <- pairlist( "rpact.output.format.p.value" = "digits = 4, nsmall = 4", "rpact.output.format.repeated.p.value" = "digits = 4, nsmall = 4", "rpact.output.format.probability" = "digits = 3, nsmall = 3", "rpact.output.format.futility.probability" = "digits = 4, nsmall = 4, futilityProbabilityEnabled = TRUE", "rpact.output.format.sample.size" = "digits = 1, nsmall = 1", "rpact.output.format.event" = "digits = 1, nsmall = 1, trimSingleZeros = TRUE", "rpact.output.format.event.time" = "digits = 3, trimSingleZeros = TRUE", "rpact.output.format.conditional.power" = "digits = 4", "rpact.output.format.critical.value" = "digits = 3, nsmall = 3", "rpact.output.format.critical.value.fisher" = "digits = 4", "rpact.output.format.test.statistic.fisher" = "digits = 4", "rpact.output.format.test.statistic" = "digits = 3, nsmall = 3", "rpact.output.format.rate" = "digits = 3, nsmall = 3", "rpact.output.format.rate1" = "digits = 1, nsmall = 1", "rpact.output.format.accrual.intensity" = "digits = 2, nsmall = 1", "rpact.output.format.mean" = "digits = 4", "rpact.output.format.ratio" = "digits = 3", "rpact.output.format.st.dev" = "digits = 4", "rpact.output.format.duration" = "digits = 2, nsmall = 2", "rpact.output.format.time" = "digits = 2, nsmall = 2" ) .getFormattedValue <- function(value, ..., digits, nsmall = NA_integer_, futilityProbabilityEnabled = FALSE, roundFunction = NA_character_, scientific = NA, trimEndingZerosAfterDecimalPoint = FALSE) { if (missing(value)) { return("NA") } if (is.null(value) || length(value) == 0) { return(value) } if (!is.numeric(value)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'value' must be a numeric vector") } if (futilityProbabilityEnabled) { value[value >= 0 & value < 1e-09] <- 0 # only futility probilities } if (!is.na(roundFunction)) { if (roundFunction == "ceiling") { value <- ceiling(value * 10^digits) / 10^digits } else if (roundFunction == "floor") { value <- floor(value * 10^digits) / 10^digits } else if (roundFunction == "trunc") { value <- trunc(value) } else if (roundFunction == "round ") { value <- round(value, digits = digits) } else if (roundFunction == "signif ") { value <- signif(value, digits = digits) } } if (is.na(nsmall)) { nsmall <- 0L } formattedValue <- format(value, digits = digits, nsmall = nsmall, scientific = scientific, justify = "left", trim = TRUE ) if ((is.na(scientific) || scientific) && any(grepl("e", formattedValue))) { formattedValueTemp <- c() for (valueTemp in value) { if (!is.na(scientific) && !scientific && digits > 0 && nsmall == 0) { maxValue <- 1 / 10^digits if (valueTemp < maxValue) { valueTemp <- paste0("<", maxValue) } } else { valueTemp <- format(valueTemp, digits = digits, nsmall = nsmall, scientific = scientific, justify = "left", trim = TRUE ) } formattedValueTemp <- c(formattedValueTemp, valueTemp) } formattedValue <- formattedValueTemp } if (futilityProbabilityEnabled) { formattedValue[value == 0] <- "0" } if (trimEndingZerosAfterDecimalPoint) { formattedValue <- gsub("\\.0+$", "", formattedValue) } return(formattedValue) } .getZeroCorrectedValue <- function(value) { if (is.numeric(value)) { value[abs(value) < 1e-08] <- 0 } return(value) } .getPValueDecimalPlaces <- function(value) { value <- stats::na.omit(value) if (length(value) == 0) { return(4) } fv <- .getFormattedValue(value[value >= 1e-4], digits = 4, nsmall = 4) fv <- fv[!((1:length(fv)) %in% grep("e", fv))] numberOfCharacters <- ifelse(length(fv) > 0, nchar(fv[1]), 6) numberOfCharacters <- ifelse(numberOfCharacters < 6, 6, numberOfCharacters) decimalPlaces <- numberOfCharacters - 2 return(decimalPlaces) } .assertIsValitOutputFormatOptionValue <- function(optionKey, optionValue) { if (is.null(optionValue) || length(optionValue) == 0 || nchar(trimws(optionValue)) == 0) { return(invisible()) } parts <- base::strsplit(optionValue, " *, *", fixed = FALSE)[[1]] if (length(parts) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the value (", optionValue, ") of output format option '", optionKey, "' is invalid" ) } for (part in parts) { if (!grepl(" *= *", part)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", optionKey, "' (", part, ") must contain a valid argument-value-pair: \"argument = value\"" ) } keyValuePair <- base::strsplit(part, " *= *", fixed = FALSE)[[1]] if (length(keyValuePair) != 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", optionKey, "' contains an invalid argument-value-pair: ", part ) } key <- trimws(keyValuePair[1]) if (nchar(key) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", optionKey, "' contains an invalid argument") } if (!(key %in% C_OUTPUT_FORMAT_ARGUMENTS)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", optionKey, "' contains an invalid argument: ", key) } value <- trimws(keyValuePair[2]) if (nchar(value) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'", optionKey, "' contains an invalid value") } if (key %in% c("digits", "nsmall")) { if (grepl("\\D", value)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the value (", value, ") of '", optionKey, "' must be an integer value" ) } } else if (key %in% c("roundFunction")) { if (!(value %in% C_ROUND_FUNCTIONS)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the value (", value, ") of '", optionKey, "' must be one of these character values: ", .arrayToString(C_ROUND_FUNCTIONS, encapsulate = TRUE) ) } } else if (key %in% c("trimSingleZeros", "futilityProbabilityEnabled")) { if (!grepl("TRUE|FALSE", toupper(value))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "the value (", value, ") of '", optionKey, "' must be a logical value" ) } } } } .assertIsValitOutputFormatOptionValue("rpact.output.format.sample.size", "roundFunction = ceiling") .getOutputFormatOptions <- function(optionKey) { str <- getOption(optionKey) if (is.null(str) || length(str) == 0 || nchar(trimws(str)) == 0) { return(NULL) } parts <- base::strsplit(str, " *, *", fixed = FALSE)[[1]] if (length(parts) == 0) { return(NULL) } result <- list() for (part in parts) { .assertIsValitOutputFormatOptionValue(optionKey, optionValue = part) keyValuePair <- base::strsplit(part, " *= *", fixed = FALSE)[[1]] key <- trimws(keyValuePair[1]) value <- trimws(keyValuePair[2]) if (key %in% c("digits", "nsmall")) { value <- as.integer(value) } else if (key %in% c("trimSingleZeros", "futilityProbabilityEnabled")) { value <- as.logical(value) } result[[key]] <- value } return(result) } .getOptionBasedFormattedValue <- function(optionKey, value, digits, nsmall = NA_integer_, trimSingleZeros = FALSE, futilityProbabilityEnabled = FALSE, roundFunction = NA_character_) { outputFormatOptions <- .getOutputFormatOptions(optionKey) if (is.null(outputFormatOptions) || length(outputFormatOptions) == 0) { return(NULL) } if (!is.null(outputFormatOptions[["digits"]])) { digits <- outputFormatOptions[["digits"]] } if (!is.null(outputFormatOptions[["nsmall"]])) { nsmall <- outputFormatOptions[["nsmall"]] } if (!is.null(outputFormatOptions[["trimSingleZeros"]])) { trimSingleZeros <- outputFormatOptions[["trimSingleZeros"]] } if (!is.null(outputFormatOptions[["futilityProbabilityEnabled"]])) { futilityProbabilityEnabled <- outputFormatOptions[["futilityProbabilityEnabled"]] } if (!is.null(outputFormatOptions[["roundFunction"]])) { roundFunction <- outputFormatOptions[["roundFunction"]] } if (trimSingleZeros) { value <- .getZeroCorrectedValue(value) } return(.getFormattedValue(value, digits = digits, nsmall = nsmall, futilityProbabilityEnabled = futilityProbabilityEnabled, roundFunction = roundFunction )) } # # @title # Format P Values # # @description # Formats the output of p-values. # # @details # Digits = 4, nsmall = 4. # Replaces p-values in scientific format (e.g., 1e-07) by a non-scientific format (e.g., <0.00001). # # @param value a vector of p-values. # .formatPValues <- function(value) { if (sum(is.na(value)) == length(value)) { return(value) } x <- .getOptionBasedFormattedValue("rpact.output.format.p.value", value = value, digits = 4, nsmall = 4 ) if (!is.null(x)) { return(x) } decimalPlaces <- .getPValueDecimalPlaces(value) if (is.na(decimalPlaces) || is.nan(decimalPlaces)) { decimalPlaces <- 4 } else if (decimalPlaces > 4) { decimalPlaces <- decimalPlaces - 1 } threshold <- 10^-decimalPlaces text <- "<0." for (i in 1:(decimalPlaces - 1)) { text <- paste0(text, "0") } text <- paste0(text, "1") indices <- (value < threshold) value[indices] <- threshold formattedValue <- .getFormattedValue(value, digits = 4, nsmall = 4) formattedValue[indices] <- text return(formattedValue) } # # @title # Format Repeated P Values # # @description # Formats the output of repeated p-values. # # @details # If p-value > 0.4999 then ">=0.5" will be returned. # # @param value a vector of p-values. # .formatRepeatedPValues <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.repeated.p.value", value = value, digits = 4, nsmall = 4 ) if (!is.null(x)) { return(x) } pValues <- .formatPValues(value) pValues[value > 0.4999] <- ">0.5" return(pValues) } # # @title # Format Probabilities # # @description # Formats the output of probabilities. # # @details # Digits = 4, nsmall = 4 # .formatProbabilities <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.probability", value = value, digits = 4, nsmall = 4 ) if (!is.null(x)) { return(x) } value[abs(value) < 1e-08] <- 0 return(.getFormattedValue(value, digits = 4, nsmall = 4)) } # # @title # Format Sample Sizes # # @description # Formats the output of sample sizes. # # @details # Digits = 1, nsmall = 1 # .formatSampleSizes <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.sample.size", value = value, digits = 1, nsmall = 1, trimSingleZeros = TRUE ) if (!is.null(x)) { return(x) } return(.getFormattedValue(.getZeroCorrectedValue(value), digits = 1, nsmall = 1, trimEndingZerosAfterDecimalPoint = TRUE)) } # # @title # Format Events # # @description # Formats the output of events. # # @details # Digits = 1, nsmall = 1, trimSingleZeros = TRUE # .formatEvents <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.event", value = value, digits = 1, nsmall = 1, trimSingleZeros = TRUE ) if (!is.null(x)) { return(x) } return(.getFormattedValue(.getZeroCorrectedValue(value), digits = 1, nsmall = 1, trimEndingZerosAfterDecimalPoint = TRUE)) } # # @title # Format Conditional Power # # @description # Formats the output of contional power. # # @details # Digits = 4 # .formatConditionalPower <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.conditional.power", value = value, digits = 4 ) if (!is.null(x)) { return(x) } value <- round(value, digits = 4) conditionalPower <- .getFormattedValue(value, digits = 4) conditionalPower[value == 0] <- "0" return(conditionalPower) } # # @title # Format Futility Probabilities # # @description # Formats the output of futility probabilities. # # @details # Digits = 4, nsmall = 4 # .formatFutilityProbabilities <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.futility.probability", value = value, digits = 4, nsmall = 4, futilityProbabilityEnabled = TRUE ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 4, nsmall = 4, futilityProbabilityEnabled = TRUE)) } # # @title # Format Group Sequential Critical Values # # @description # Formats the output of group sequential critical values. # # @details # Digits = 3, nsmall = 3 # .formatCriticalValues <- function(value) { value[value == C_FUTILITY_BOUNDS_DEFAULT] <- -Inf x <- .getOptionBasedFormattedValue("rpact.output.format.critical.value", value = value, digits = 3, nsmall = 3 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 3, nsmall = 3)) } # # @title # Format Fisher Critical Values # # @description # Formats the output of Fisher's combination critical values. # # @details # Digits = 4 # .formatCriticalValuesFisher <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.critical.value.fisher", value = value, digits = 4 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 4)) } # # @title # Format Fisher Test Statistics # # @description # Formats the output of Fisher's combination test statistics. # # @details # Digits = 4 # .formatTestStatisticsFisher <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.test.statistic.fisher", value = value, digits = 4 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 4)) # , scientific = FALSE } # # @title # Format Test Statistics # # @description # Formats the output of test statistics (e.g., inverse normal). # # @details # Digits = 3, nsmall = 3 # .formatTestStatistics <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.test.statistic", value = value, digits = 3, nsmall = 3 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 3, nsmall = 3)) # , scientific = FALSE } # # @title # Format Rates # # @description # Formats the output of rates. # # @details # Digits = 3, nsmall = 3 # .formatRates <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.rate", value = value, digits = 3, nsmall = 3 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 3, nsmall = 3)) } # # @title # Format Rates Dynamic # # @description # Formats the output of rates. # # @details # Digits = 3, nsmall = 3 if value < 1; digits = 1, nsmall = 1 otherwise # .formatRatesDynamic <- function(value) { if (!any(is.na(value)) && all(value >= 1)) { x <- .getOptionBasedFormattedValue("rpact.output.format.rate1", value = value, digits = 1, nsmall = 1 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 1, nsmall = 1)) } x <- .getOptionBasedFormattedValue("rpact.output.format.rate", value = value, digits = 3, nsmall = 3 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 3, nsmall = 3)) } # # @title # Format Accrual Intensities # # @description # Formats the output of accrual intensities. # # @details # Digits = 1, nsmall = 1 # .formatAccrualIntensities <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.accrual.intensity", value = value, digits = 2, nsmall = 1 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 2, nsmall = 1)) } # # @title # Format Means # # @description # Formats the output of means. # # @details # Digits = 4 # .formatMeans <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.mean", value = value, digits = 4 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 4)) } # # @title # Format Ratios # # @description # Formats the output of ratios. # # @details # Digits = 3 # .formatRatios <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.ratio", value = value, digits = 3 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 3)) } # # @title # Format StDevs # # @description # Formats the output of standard deviations. # # @details # Digits = 4 # .formatStDevs <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.st.dev", value = value, digits = 4 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 4)) } # # @title # Format Durations # # @description # Formats the output of study durations. # # @details # Digits = 3 # .formatDurations <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.duration", value = value, digits = 2, nsmall = 2 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 2, nsmall = 2)) } # # @title # Format Time # # @description # Formats the output of time values, e.g. months. # # @details # Digits = 3 # .formatTime <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.time", value = value, digits = 2, nsmall = 2 ) if (!is.null(x)) { return(x) } return(.getFormattedValue(value, digits = 2, nsmall = 2)) } # # @title # Format Time # # @description # Formats the output of time values, e.g. months. # # @details # Digits = 3 # .formatEventTime <- function(value) { x <- .getOptionBasedFormattedValue("rpact.output.format.event.time", value = value, digits = 3, trimSingleZeros = TRUE ) if (!is.null(x)) { return(x) } return(.getFormattedValue(.getZeroCorrectedValue(value), digits = 3)) } .formatHowItIs <- function(value) { return(format(value, scientific = FALSE)) } .getFormattedVariableName <- function(name, n, prefix = "", postfix = "") { if (!is.character(name)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'name' must be of type 'character' (is '", .getClassName(name), "')" ) } if (!is.numeric(n)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'n' must be of type 'numeric' (is '", .getClassName(n), "')") } if (n < 1 || n > 300) { stop(C_EXCEPTION_TYPE_ARGUMENT_OUT_OF_BOUNDS, "'n' (", n, ") is out of bounds [1; 300]") } if (nchar(prefix) > 0) { name <- paste(prefix, name) } if (nchar(postfix) > 0) { name <- paste0(name, postfix) } while (nchar(name) < n) { name <- paste0(name, " ") } name <- paste0(" ", name, " :") return(name) } #' @title #' Set Output Format #' #' @description #' With this function the format of the standard outputs of all \code{rpact} #' objects can be changed and set user defined respectively. #' #' @param parameterName The name of the parameter whose output format shall be edited. #' Leave the default \code{NA_character_} if #' the output format of all parameters shall be edited. #' @param digits How many significant digits are to be used for a numeric value. #' The default, \code{NULL}, uses getOption("digits"). #' Allowed values are \code{0 <= digits <= 20}. #' @param nsmall The minimum number of digits to the right of the decimal point in #' formatting real numbers in non-scientific formats. #' Allowed values are \code{0 <= nsmall <= 20}. #' @param trimSingleZeros If \code{TRUE} zero values will be trimmed in the output, e.g., #' "0.00" will displayed as "0" #' @param futilityProbabilityEnabled If \code{TRUE} very small value (< 1e-09) will #' be displayed as "0", default is \code{FALSE}. #' @param file An optional file name of an existing text file that contains output format definitions #' (see Details for more information). #' @param resetToDefault If \code{TRUE} all output formats will be reset to default value. #' Note that other settings will be executed afterwards if specified, default is \code{FALSE}. #' @param roundFunction A character value that specifies the R base round function #' to use, default is \code{NA_character_}. #' Allowed values are "ceiling", "floor", "trunc", "round", "signif", and \code{NA_character_}. #' @inheritParams param_three_dots #' #' @details #' Output formats can be written to a text file (see \code{\link[=getOutputFormat]{getOutputFormat()}}). #' To load your personal output formats read a formerly saved file at the beginning of your #' work with \code{rpact}, e.g. execute \code{setOutputFormat(file = "my_rpact_output_formats.txt")}. #' #' Note that the \code{parameterName} must not match exactly, e.g., for p-values the #' following parameter names will be recognized amongst others: #' \enumerate{ #' \item \code{p value} #' \item \code{p.values} #' \item \code{p-value} #' \item \code{pValue} #' \item \code{rpact.output.format.p.value} #' } #' #' @seealso \code{\link[base]{format}} for details on the #' function used internally to format the values. #' #' @template examples_set_output_format #' #' @family output formats #' #' @export #' setOutputFormat <- function(parameterName = NA_character_, ..., digits = NA_integer_, nsmall = NA_integer_, trimSingleZeros = NA, futilityProbabilityEnabled = NA, file = NA_character_, resetToDefault = FALSE, roundFunction = NA_character_) { .assertIsCharacter(parameterName, "parameterName", naAllowed = TRUE) .assertIsSingleInteger(digits, "digits", naAllowed = TRUE, validateType = FALSE) .assertIsInClosedInterval(digits, "digits", lower = 0, upper = 20, naAllowed = TRUE) .assertIsSingleInteger(nsmall, "nsmall", naAllowed = TRUE, validateType = FALSE) .assertIsInClosedInterval(nsmall, "nsmall", lower = 0, upper = 20, naAllowed = TRUE) .assertIsSingleLogical(trimSingleZeros, "trimSingleZeros", naAllowed = TRUE) .assertIsSingleLogical(futilityProbabilityEnabled, "futilityProbabilityEnabled", naAllowed = TRUE) .assertIsSingleCharacter(file, "file", naAllowed = TRUE) .assertIsSingleLogical(resetToDefault, "resetToDefault") .assertIsSingleCharacter(roundFunction, "roundFunction", naAllowed = TRUE) .warnInCaseOfUnknownArguments(functionName = "setOutputFormat", ...) if (resetToDefault) { .resetAllOutputFormats() } if (!is.na(file)) { if (!file.exists(file)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'file' (", file, ") does not exist") } args <- list() outputFormatLines <- .readLinesFromFile(file) counter <- 0 for (line in outputFormatLines) { if (!grepl("^ *#", line)) { keyValuePair <- base::strsplit(line, " *: *", fixed = FALSE)[[1]] if (length(keyValuePair) == 2) { key <- .getOutputFormatKey(keyValuePair[1], silent = TRUE) if (!is.null(key)) { value <- trimws(keyValuePair[2]) .assertIsValitOutputFormatOptionValue(optionKey = key, optionValue = value) if (grepl("digits|nsmall|trimSingleZeros|futilityProbabilityEnabled", value)) { args[[key]] <- value } else { warning('Line "', line, '" contains an invalid value: ', value) } } else { warning('Line "', line, '" contains an invalid key: ', keyValuePair[1]) } } else if (nchar(trimws(line)) > 0) { warning('Line "', line, '" does not contain a valid key-value-pair') } if (nchar(trimws(line)) > 0) { counter <- counter + 1 } } } if (length(args) > 0) { base::options(args) cat(length(args), " (of ", counter, " defined) output format", ifelse(length(args) == 1, "", "s"), " successfully set via file\n", sep = "" ) } } if (!all(is.na(parameterName))) { for (param in parameterName) { key <- .getOutputFormatKeyByFieldName(param) if (is.null(key)) { key <- .getOutputFormatKey(param) } cmds <- c() if (!is.na(digits)) { cmds <- c(cmds, paste0("digits = ", digits)) } if (!is.na(nsmall)) { cmds <- c(cmds, paste0("nsmall = ", nsmall)) } if (!is.na(trimSingleZeros)) { cmds <- c(cmds, paste0("trimSingleZeros = ", trimSingleZeros)) } if (!is.na(futilityProbabilityEnabled)) { cmds <- c(cmds, paste0("futilityProbabilityEnabled = ", futilityProbabilityEnabled)) } if (!is.na(roundFunction)) { cmds <- c(cmds, paste0("roundFunction = ", roundFunction)) } cmd <- NULL resetPrefix <- "" if (length(cmds) > 0) { cmd <- paste0(cmds, collapse = ", ") } else { cmd <- C_OUTPUT_FORMAT_DEFAULT_VALUES[[key]] resetPrefix <- "re" } args <- list() args[[key]] <- cmd base::options(args) cat("Output format successfully ", resetPrefix, 'set: "', key, '" = "', cmd, '"\n', sep = "") fields <- .getOutputFormatParameterNames(key) if (!is.null(fields) && length(fields) > 0) { if (length(fields) == 1) { cat("This output format affects the following parameter:", fields, "\n") } else { cat("This output format affects ", length(fields), " parameters: ", .arrayToString(fields), "\n", sep = "" ) } } else { warning("The output format ", key, " affects no parameters", call. = FALSE) } } } } .getOutputFormatKey <- function(parameterName, silent = FALSE) { .assertIsSingleCharacter(parameterName, "parameterName") if (grepl("^rpact\\.output\\.format\\.[a-z1\\.]*", parameterName)) { value <- C_OUTPUT_FORMAT_DEFAULT_VALUES[[parameterName]] if (is.null(value)) { if (silent) { return(NULL) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'parameterName' (", parameterName, ") does not exist") } return(parameterName) } x <- tolower(parameterName) keys <- names(C_OUTPUT_FORMAT_DEFAULT_VALUES) for (key in keys) { keyRegex <- sub("^rpact\\.output\\.format\\.", "", key) keyRegex <- gsub("\\.asn$", ".(asn|average.sample.number)", keyRegex) keyRegex <- gsub("^simulation\\.result$", "simulation.(results?)?", keyRegex) keyRegex <- gsub("^st\\.", "st(andard)?.", keyRegex) keyRegex <- gsub("\\.dev$", ".dev(iation)?", keyRegex) keyRegex <- gsub("\\.", " ?(\\.|-)? ?", keyRegex) keyRegex <- gsub("1", "s? ?(\\.|-)? ?1", keyRegex) keyRegex <- sub("y$", "(y|ies)", keyRegex) if (grepl("(e|t|c|n|o)$", keyRegex)) { keyRegex <- paste0(keyRegex, "s?") } keyRegex <- paste0("^", keyRegex, "$") if (grepl(keyRegex, x)) { return(key) } } if (silent) { return(NULL) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "output format key for 'parameterName' (", parameterName, ") could not be found") } .writeOutputFormatsToFile <- function(outputFormatList, file) { outputFormatLines <- c() outputFormatLines <- c(outputFormatLines, "##") outputFormatLines <- c(outputFormatLines, "## rpact output formats") outputFormatLines <- c(outputFormatLines, "## www.rpact.com") outputFormatLines <- c(outputFormatLines, paste0("## creation date: ", format(Sys.time(), "%d %b %Y, %X"))) outputFormatLines <- c(outputFormatLines, "##") for (key in names(outputFormatList)) { outputFormatLines <- c(outputFormatLines, paste(key, ":", outputFormatList[[key]])) } .writeLinesToFile(outputFormatLines, file) cat(length(outputFormatList), " output format", ifelse(length(args) == 1, "", "s"), " successfully written to file\n", sep = "" ) } #' @title #' Get Output Format #' #' @description #' With this function the format of the standard outputs of all \code{rpact} #' objects can be shown and written to a file. #' #' @param parameterName The name of the parameter whose output format shall be returned. #' Leave the default \code{NA_character_} if #' the output format of all parameters shall be returned. #' @param file An optional file name where to write the output formats #' (see Details for more information). #' @param default If \code{TRUE} the default output format of the specified parameter(s) #' will be returned, default is \code{FALSE}. #' @param fields If \code{TRUE} the names of all affected object fields will be displayed, default is \code{TRUE}. #' @inheritParams param_three_dots #' #' @details #' Output formats can be written to a text file by specifying a \code{file}. #' See \code{\link[=setOutputFormat]{setOutputFormat()}}() to learn how to read a formerly saved file. #' #' Note that the \code{parameterName} must not match exactly, e.g., for p-values the #' following parameter names will be recognized amongst others: #' \enumerate{ #' \item \code{p value} #' \item \code{p.values} #' \item \code{p-value} #' \item \code{pValue} #' \item \code{rpact.output.format.p.value} #' } #' #' @return A named list of output formats. #' #' @template examples_set_output_format #' #' @family output formats #' #' @export #' getOutputFormat <- function(parameterName = NA_character_, ..., file = NA_character_, default = FALSE, fields = TRUE) { if (all(is.na(parameterName)) || length(parameterName) <= 1) { return(.getOutputFormat( parameterName = parameterName, file = file, default = default, fields = fields, ... )) } .assertIsSingleCharacter(file, "file", naAllowed = TRUE) .assertIsSingleLogical(fields, "fields") results <- c() currentOutputFormats <- c() for (p in parameterName) { results <- c(results, .getOutputFormat( parameterName = p, file = NA_character_, default = default, fields = fields, ... )) if (!is.na(file)) { currentOutputFormats <- c( currentOutputFormats, .getOutputFormat( parameterName = p, file = NA_character_, default = default, fields = FALSE, ... ) ) } } if (!is.na(file)) { .writeOutputFormatsToFile(currentOutputFormats, file) } return(results) } .getOutputFormat <- function(parameterName = NA_character_, ..., file = NA_character_, default = FALSE, fields = TRUE) { .assertIsSingleCharacter(parameterName, "parameterName", naAllowed = TRUE) .assertIsSingleCharacter(file, "file", naAllowed = TRUE) .assertIsSingleLogical(default, "default") .assertIsSingleLogical(fields, "fields") .warnInCaseOfUnknownArguments(functionName = "getOutputFormat", ...) currentOutputFormats <- pairlist() if (is.na(parameterName)) { if (default) { currentOutputFormats <- C_OUTPUT_FORMAT_DEFAULT_VALUES } else { for (key in names(C_OUTPUT_FORMAT_DEFAULT_VALUES)) { currentOutputFormats[[key]] <- getOption(key, default = C_OUTPUT_FORMAT_DEFAULT_VALUES[[key]] ) } } if (!is.na(file)) { .writeOutputFormatsToFile(currentOutputFormats, file) return(invisible(.addFieldsToOutputFormatList(currentOutputFormats, fields))) } return(.addFieldsToOutputFormatList(currentOutputFormats, fields)) } key <- .getOutputFormatKey(parameterName) if (default) { value <- C_OUTPUT_FORMAT_DEFAULT_VALUES[[key]] } else { value <- getOption(key, default = C_OUTPUT_FORMAT_DEFAULT_VALUES[[key]]) } currentOutputFormats[[key]] <- value if (!is.na(file)) { .writeOutputFormatsToFile(currentOutputFormats, file) } return(.addFieldsToOutputFormatList(currentOutputFormats, fields)) } .addFieldsToOutputFormatList <- function(outputFormatList, fields = TRUE) { if (!fields) { return(outputFormatList) } results <- list() for (key in names(outputFormatList)) { results[[key]] <- list( format = outputFormatList[[key]], fields = .getOutputFormatParameterNames(key) ) } return(results) } .getOutputFormatParameterNames <- function(key) { functionName <- .getOutputFormatFunctionName(key) if (is.null(functionName)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'key' (", key, ") does not exist") } parameterNames <- c() for (parameterName in names(C_PARAMETER_FORMAT_FUNCTIONS)) { if (functionName == C_PARAMETER_FORMAT_FUNCTIONS[[parameterName]]) { parameterNames <- c(parameterNames, parameterName) } } if (key == "rpact.output.format.rate") { return(c(parameterNames, .getOutputFormatParameterNames("rpact.output.format.rate1"))) } return(parameterNames) } .getOutputFormatFunctionName <- function(key) { if (key == "rpact.output.format.p.value") { return(".formatPValues") } if (key == "rpact.output.format.repeated.p.value") { return(".formatRepeatedPValues") } if (key == "rpact.output.format.probability") { return(".formatProbabilities") } if (key == "rpact.output.format.futility.probability") { return(".formatFutilityProbabilities") } if (key == "rpact.output.format.sample.size") { return(".formatSampleSizes") } if (key == "rpact.output.format.event") { return(".formatEvents") } if (key == "rpact.output.format.event.time") { return(".formatEventTime") } if (key == "rpact.output.format.conditional.power") { return(".formatConditionalPower") } if (key == "rpact.output.format.critical.value") { return(".formatCriticalValues") } if (key == "rpact.output.format.critical.value.fisher") { return(".formatCriticalValuesFisher") } if (key == "rpact.output.format.test.statistic.fisher") { return(".formatTestStatisticsFisher") } if (key == "rpact.output.format.test.statistic") { return(".formatTestStatistics") } if (key == "rpact.output.format.rate") { return(".formatRates") } if (key == "rpact.output.format.rate1") { return(".formatRatesDynamic") } if (key == "rpact.output.format.accrual.intensity") { return(".formatAccrualIntensities") } if (key == "rpact.output.format.mean") { return(".formatMeans") } if (key == "rpact.output.format.ratio") { return(".formatRatios") } if (key == "rpact.output.format.st.dev") { return(".formatStDevs") } if (key == "rpact.output.format.duration") { return(".formatDurations") } if (key == "rpact.output.format.time") { return(".formatTime") } return(NULL) } .getOutputFormatKeyByFieldName <- function(fieldName) { if (is.null(fieldName) || length(fieldName) != 1 || is.na(fieldName)) { return(NULL) } if (!(fieldName %in% names(C_PARAMETER_FORMAT_FUNCTIONS))) { return(NULL) } functionName <- C_PARAMETER_FORMAT_FUNCTIONS[[fieldName]] if (is.null(functionName)) { return(NULL) } return(.getOutputFormatKeyByFunctionName(functionName)) } .getOutputFormatKeyByFunctionName <- function(functionName) { for (key in names(C_OUTPUT_FORMAT_DEFAULT_VALUES)) { if (.getOutputFormatFunctionName(key) == functionName) { return(key) } } return(NULL) } .resetAllOutputFormats <- function() { base::options(C_OUTPUT_FORMAT_DEFAULT_VALUES) cat(length(C_OUTPUT_FORMAT_DEFAULT_VALUES), "output formats were successfully reset\n") } rpact/R/f_quality_assurance.R0000644000176200001440000006644314445307576016004 0ustar liggesusers## | ## | *Quality Assurance* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_logger.R NULL # See testthat::skip_on_cran() .skipTestIfDisabled <- function() { if (!isTRUE(.isCompleteUnitTestSetEnabled()) && base::requireNamespace("testthat", quietly = TRUE)) { testthat::skip("Test is disabled") } } .skipTestIfNotX64 <- function() { if (!.isMachine64Bit() && !.isMinimumRVersion4() && base::requireNamespace("testthat", quietly = TRUE)) { testthat::skip("The test is only intended for R version 4.x or 64-bit computers (x86-64)") } } .skipTestIfPipeOperatorNotAvailable <- function() { if (!.isPipeOperatorAvailable()) { testthat::skip("The test is disabled because it works only for R version >= 4.1.0 (pipe operator is available)") } } .isMachine64Bit <- function() { return(Sys.info()[["machine"]] == "x86-64") } .isMinimumRVersion4 <- function() { return(R.Version()$major >= 4) } .isPipeOperatorAvailable <- function() { rVersion <- R.Version() return(rVersion$major >= 4 && rVersion$minor >= "1.0") } .getTestthatResultLine <- function(fileContent) { if (grepl("\\[ OK:", fileContent)) { indexStart <- regexpr("\\[ OK: \\d", fileContent)[[1]] indexEnd <- regexpr("FAILED: \\d{1,5} \\]", fileContent) indexEnd <- indexEnd[[1]] + attr(indexEnd, "match.length") - 1 resultPart <- substr(fileContent, indexStart, indexEnd) return(resultPart) } indexStart <- regexpr("\\[ FAIL \\d", fileContent)[[1]] if (indexStart == -1) { return("[ FAIL 0 | WARN 0 | SKIP 0 | PASS 14868 ]") } indexEnd <- regexpr("PASS \\d{1,5} \\]", fileContent) indexEnd <- indexEnd[[1]] + attr(indexEnd, "match.length") - 1 resultPart <- substr(fileContent, indexStart, indexEnd) return(resultPart) } .getTestthatResultNumberOfFailures <- function(fileContent) { if (grepl("FAILED:", fileContent)) { line <- .getTestthatResultLine(fileContent) index <- regexpr("FAILED: \\d{1,5} \\]", line) indexStart <- index[[1]] + 8 indexEnd <- index[[1]] + attr(index, "match.length") - 3 return(substr(line, indexStart, indexEnd)) } line <- .getTestthatResultLine(fileContent) index <- regexpr("FAIL \\d{1,5} ", line) indexStart <- index[[1]] + 5 indexEnd <- index[[1]] + attr(index, "match.length") - 2 return(substr(line, indexStart, indexEnd)) } .getTestthatResultNumberOfSkippedTests <- function(fileContent) { if (grepl("SKIPPED:", fileContent)) { line <- .getTestthatResultLine(fileContent) index <- regexpr("SKIPPED: \\d{1,5} {1,1}", line) indexStart <- index[[1]] + 9 indexEnd <- index[[1]] + attr(index, "match.length") - 2 return(substr(line, indexStart, indexEnd)) } line <- .getTestthatResultLine(fileContent) index <- regexpr("SKIP \\d{1,5} {1,1}", line) indexStart <- index[[1]] + 5 indexEnd <- index[[1]] + attr(index, "match.length") - 2 return(substr(line, indexStart, indexEnd)) } # testFileTargetDirectory <- "D:/R/_temp/test_debug" .downloadUnitTests <- function(testFileTargetDirectory, ..., token, secret, method = "auto", mode = "wb", cacheOK = TRUE, extra = getOption("download.file.extra"), cleanOldFiles = TRUE, connectionType = c("http", "ftp", "pkg")) { .assertIsSingleCharacter(testFileTargetDirectory, "testFileTargetDirectory") .assertIsSingleCharacter(token, "token") .assertIsSingleCharacter(secret, "secret") connectionType <- match.arg(connectionType) if (grepl("testthat(/|\\\\)?$", testFileTargetDirectory)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'testFileTargetDirectory' (", testFileTargetDirectory, ") must not end with 'testthat'" ) } if (cleanOldFiles) { unlink(testFileTargetDirectory, recursive = TRUE) } dir.create(testFileTargetDirectory, recursive = TRUE) testthatSubDirectory <- file.path(testFileTargetDirectory, "testthat") if (!dir.exists(testthatSubDirectory)) { dir.create(testthatSubDirectory, recursive = TRUE) } if (connectionType == "ftp") { suppressWarnings(.downloadUnitTestsViaFtp( testFileTargetDirectory = testFileTargetDirectory, testthatSubDirectory = testthatSubDirectory, token = token, secret = secret, method = method, mode = mode, cacheOK = cacheOK, extra = extra )) } else if (connectionType == "http") { suppressWarnings(.downloadUnitTestsViaHttp( testFileTargetDirectory = testFileTargetDirectory, testthatSubDirectory = testthatSubDirectory, token = token, secret = secret )) } else if (connectionType == "pkg") { .prepareUnitTestFiles(extra, testFileTargetDirectory, token, secret) } } .prepareUnitTestFiles <- function(packageSource, testFileTargetDirectory, token, secret) { if (is.null(packageSource)) { return(invisible()) } .assertIsValidCipher("token", token) .assertIsValidCipher("secret", secret) .assertIsSingleCharacter(packageSource, "packageSource") if (!file.exists(packageSource)) { warning(sQuote("packageSource"), " (", packageSource, ") does not exist") } if (!grepl("\\.tar\\.gz$", packageSource)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "file ", sQuote(packageSource), " must have a .tar.gz extension") } unlinkFile <- FALSE if (grepl("^http", packageSource)) { tempFile <- tempfile(fileext = ".tar.gz") if (utils::download.file(packageSource, tempFile) != 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote(packageSource), " seems to be an invalid URL") } packageSource <- tempFile unlinkFile <- TRUE } testthatTempDirectory <- NULL tryCatch( { contentLines <- utils::untar(packageSource, list = TRUE) if (!("rpact/DESCRIPTION" %in% contentLines) || !("rpact/tests/testthat/" %in% contentLines)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "file ", sQuote(packageSource), " is not an rpact package source file") } testthatTempDirectory <- file.path(testFileTargetDirectory, "temp") utils::untar(packageSource, files = c( "rpact/tests/testthat.R", "rpact/tests/testthat/" ), exdir = testthatTempDirectory) testthatTempSubDirectory <- file.path(testthatTempDirectory, "rpact", "tests") testFiles <- list.files(testthatTempSubDirectory, pattern = "\\.R$", recursive = TRUE) for (testFile in testFiles) { file.copy(file.path(testthatTempSubDirectory, testFile), file.path(testFileTargetDirectory, testFile)) } message(length(testFiles), " extracted from ", sQuote(packageSource), " and copied to ", sQuote(testFileTargetDirectory)) }, finally = { if (!is.null(testthatTempDirectory)) { unlink(testthatTempDirectory, recursive = TRUE) } if (unlinkFile) { unlink(packageSource) } } ) } .downloadUnitTestsViaHttp <- function(testFileTargetDirectory, ..., testthatSubDirectory, token, secret) { indexFile <- file.path(testFileTargetDirectory, "index.html") currentFile <- NA_character_ tryCatch( { version <- utils::packageVersion("rpact") baseUrl <- paste0("http://", token, ":", secret, "@unit.tests.rpact.com/", version, "/tests/") if (!dir.exists(testFileTargetDirectory)) { dir.create(testFileTargetDirectory) } if (!dir.exists(testthatSubDirectory)) { dir.create(testthatSubDirectory) } testthatBaseFile <- system.file("tests", "testthat.R", package = "rpact") if (file.exists(testthatBaseFile)) { file.copy(testthatBaseFile, file.path(testFileTargetDirectory, "testthat.R")) } else { currentFile <- "testthat.R" result <- download.file( url = paste0(baseUrl, "testthat.R"), destfile = file.path(testFileTargetDirectory, "testthat.R"), method = "auto", mode = "wb" ) if (result != 0) { warning("'testthat.R' download result in ", result) } } currentFile <- "index.txt" result <- download.file( url = paste0(baseUrl, "testthat/index.txt"), destfile = indexFile, quiet = TRUE, method = "auto", mode = "wb" ) if (result != 0) { warning("Unit test index file download result in ", result) } lines <- .readLinesFromFile(indexFile) lines <- lines[grepl("\\.R", lines)] testFiles <- gsub("\\.R<.*", ".R", lines) testFiles <- gsub(".*>", "", testFiles) testFiles <- gsub(" *$", "", testFiles) if (length(testFiles) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "online source does not contain any unit test files" ) } startTime <- Sys.time() message("Start to download ", length(testFiles), " unit test files (http). Please wait...") for (testFile in testFiles) { currentFile <- testFile result <- download.file( url = paste0(baseUrl, "testthat/", testFile), destfile = file.path(testthatSubDirectory, testFile), quiet = TRUE, method = "auto", mode = "wb" ) } message( length(testFiles), " unit test files downloaded successfully (needed ", .getRuntimeString(startTime, runtimeUnits = "secs"), ")" ) }, warning = function(w) { if (grepl("404 Not Found", w$message)) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to download unit test files (http): file ", sQuote(currentFile), " not found" ) } }, error = function(e) { if (grepl(C_EXCEPTION_TYPE_RUNTIME_ISSUE, e$message)) { stop(e$message) } .logDebug(e$message) stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to download unit test files (http): illegal 'token' / 'secret' or rpact version ", version, " unknown" ) }, finally = { if (file.exists(indexFile)) { tryCatch( { file.remove(indexFile) }, error = function(e) { warning("Failed to remove unit test index file: ", e$message, call. = FALSE) } ) } } ) } .downloadUnitTestsViaFtp <- function(testFileTargetDirectory, ..., testthatSubDirectory, token, secret, method = "auto", mode = "wb", cacheOK = TRUE, extra = getOption("download.file.extra")) { indexFile <- file.path(testFileTargetDirectory, "index.html") tryCatch( { version <- utils::packageVersion("rpact") baseUrl <- paste0("ftp://", token, ":", secret, "@ftp.rpact.com/", version, "/tests/") testthatBaseFile <- system.file("tests", "testthat.R", package = "rpact") if (file.exists(testthatBaseFile)) { file.copy(testthatBaseFile, file.path(testFileTargetDirectory, "testthat.R")) } else { result <- download.file( url = paste0(baseUrl, "testthat.R"), destfile = file.path(testFileTargetDirectory, "testthat.R"), method = method, quiet = TRUE, mode = mode, cacheOK = cacheOK, extra = extra, headers = NULL ) if (result != 0) { warning("'testthat.R' download result in ", result) } } result <- download.file( url = paste0(baseUrl, "testthat/"), destfile = indexFile, method = method, quiet = TRUE, mode = mode, cacheOK = cacheOK, extra = extra, headers = NULL ) if (result != 0) { warning("Unit test index file download result in ", result) } lines <- .readLinesFromFile(indexFile) lines <- lines[grepl("\\.R", lines)] testFiles <- gsub("\\.R<.*", ".R", lines) testFiles <- gsub(".*>", "", testFiles) if (length(testFiles) == 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "online source does not contain any unit test files" ) } startTime <- Sys.time() message("Start to download ", length(testFiles), " unit test files (ftp). Please wait...") for (testFile in testFiles) { result <- download.file( url = paste0(baseUrl, "testthat/", testFile), destfile = file.path(testthatSubDirectory, testFile), method = method, quiet = TRUE, mode = mode, cacheOK = cacheOK, extra = extra, headers = NULL ) } message( length(testFiles), " unit test files downloaded successfully (needed ", .getRuntimeString(startTime, runtimeUnits = "secs"), ")" ) }, error = function(e) { .logDebug(e$message) stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to download unit test files (ftp): illegal 'token' / 'secret' or rpact version ", version, " unknown" ) }, finally = { if (file.exists(indexFile)) { tryCatch( { file.remove(indexFile) }, error = function(e) { warning("Failed to remove unit test index file: ", e$message, call. = FALSE) } ) } } ) } .getConnectionArgument <- function(connection, name = c( "token", "secret", "method", "mode", "cacheEnabled", "extra", "cleanOldFiles", "connectionType" )) { if (is.null(connection) || !is.list(connection)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'connection' must be a list (is ", .getClassName(connection), ")") } name <- match.arg(name) defaultValues <- list( "token" = NULL, "secret" = NULL, "method" = "auto", "mode" = "wb", "cacheEnabled" = TRUE, "extra" = getOption("download.file.extra"), "cleanOldFiles" = TRUE, "connectionType" = "http" ) value <- connection[[name]] if (is.null(value)) { return(defaultValues[[name]]) } return(value) } #' @title #' Test Package # #' @description #' This function allows the installed package \code{rpact} to be tested. #' #' @param outDir The output directory where all test results shall be saved. #' By default the current working directory is used. #' @param completeUnitTestSetEnabled If \code{TRUE} (default) all existing unit tests will #' be executed; a subset of all unit tests will be used otherwise. #' @param types The type(s) of tests to be done. Can be one or more of #' \code{c("tests", "examples", "vignettes")}, default is "tests" only. #' @param connection A \code{list} where owners of the rpact validation documentation #' can enter a \code{token} and a \code{secret} to get full access to all unit tests, e.g., #' to fulfill regulatory requirements (see \href{https://www.rpact.com}{www.rpact.com} for more information). #' @inheritParams param_three_dots #' #' @details #' This function creates the subdirectory \code{rpact-tests} in the specified output directory #' and copies all unit test files of the package to this newly created directory. #' Then the function runs all tests (or a subset of all tests if #' \code{completeUnitTestSetEnabled} is \code{FALSE}) using #' \code{\link[tools]{testInstalledPackage}}. #' The test results will be saved to the text file \code{testthat.Rout} that can be found #' in the subdirectory \code{rpact-tests}. #' #' @return The value of \code{completeUnitTestSetEnabled} will be returned invisible. #' #' @examples #' \dontrun{ #' testPackage() #' } #' #' @export #' testPackage <- function(outDir = ".", ..., completeUnitTestSetEnabled = TRUE, types = "tests", connection = list(token = NULL, secret = NULL)) { .assertTestthatIsInstalled() .assertMnormtIsInstalled() .assertIsSingleCharacter(outDir, "outDir", naAllowed = FALSE) if (!dir.exists(outDir)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "test output directory '", outDir, "' does not exist" ) } startTime <- Sys.time() Sys.setenv("LANGUAGE" = "EN") on.exit(Sys.unsetenv("LANGUAGE")) temp <- .isCompleteUnitTestSetEnabled() on.exit(Sys.setenv("RPACT_COMPLETE_UNIT_TEST_SET_ENABLED" = temp), add = TRUE) Sys.setenv("RPACT_COMPLETE_UNIT_TEST_SET_ENABLED" = completeUnitTestSetEnabled) debug <- .getOptionalArgument("debug", ...) if (!is.null(debug) && length(debug) == 1 && isTRUE(as.logical(debug))) { setLogLevel(C_LOG_LEVEL_DEBUG) } else { setLogLevel(C_LOG_LEVEL_DISABLED) } on.exit(resetLogLevel(), add = TRUE) token <- .getConnectionArgument(connection, "token") secret <- .getConnectionArgument(connection, "secret") fullTestEnabled <- (!is.null(token) && !is.null(secret) && length(token) == 1 && length(secret) == 1 && !is.na(token) && !is.na(secret)) if (completeUnitTestSetEnabled && fullTestEnabled) { cat("Run all tests. Please wait...\n") cat("Have a break - it takes about 15 minutes.\n") cat("Exceution of all available unit tests startet at ", format(startTime, "%H:%M (%d-%B-%Y)"), "\n", sep = "" ) } else if (!fullTestEnabled) { cat("Run a small subset of all tests. Please wait...\n") cat("This is just a quick test (see comments below).\n") cat("The entire test will take only some seconds.\n") } else { cat("Run a subset of all tests. Please wait...\n") cat("This is just a quick test, i.e., all time consuming tests will be skipped.\n") cat("The entire test will take about a minute.\n") } if (outDir == ".") { outDir <- getwd() } oldResultFiles <- c( file.path(outDir, "rpact-tests", "testthat.Rout"), file.path(outDir, "rpact-tests", "testthat.Rout.fail") ) for (oldResultFile in oldResultFiles) { if (file.exists(oldResultFile)) { file.remove(oldResultFile) } } pkgName <- "rpact" if (!fullTestEnabled) { tools::testInstalledPackage(pkg = pkgName, outDir = outDir, types = types) } else { testFileTargetDirectory <- file.path(outDir, paste0(pkgName, "-tests")) .downloadUnitTests( testFileTargetDirectory = testFileTargetDirectory, token = token, secret = secret, method = .getConnectionArgument(connection, "method"), mode = .getConnectionArgument(connection, "mode"), cacheOK = .getConnectionArgument(connection, "cacheEnabled"), extra = .getConnectionArgument(connection, "extra"), cleanOldFiles = .getConnectionArgument(connection, "cleanOldFiles"), connectionType = .getConnectionArgument(connection, "connectionType") ) .testInstalledPackage( testFileDirectory = testFileTargetDirectory, pkgName = pkgName, outDir = testFileTargetDirectory, Ropts = "" ) } outDir <- file.path(outDir, paste0(pkgName, "-tests")) endTime <- Sys.time() if (completeUnitTestSetEnabled) { cat("Test exceution ended at ", format(endTime, "%H:%M (%d-%B-%Y)"), "\n", sep = "" ) } cat("Total runtime for testing: ", .getRuntimeString(startTime, endTime = endTime, runtimeUnits = "auto"), ".\n", sep = "") inputFileName <- file.path(outDir, "testthat.Rout") if (file.exists(inputFileName)) { fileContent <- base::readChar(inputFileName, file.info(inputFileName)$size) if (completeUnitTestSetEnabled && fullTestEnabled) { cat("All unit tests were completed successfully, i.e., the installation \n", "qualification was successful.\n", sep = "" ) } else { cat("Unit tests were completed successfully.\n", sep = "") } cat("Results:\n") cat(.getTestthatResultLine(fileContent), "\n") cat("\n") cat("Test results were written to directory \n", "'", outDir, "' (see file 'testthat.Rout')\n", sep = "" ) skipped <- .getTestthatResultNumberOfSkippedTests(fileContent) if (skipped > 0) { cat("-------------------------------------------------------------------------\n") cat("Note that ", skipped, " tests were skipped; ", "a possible reason may be that expected \n", "error messages could not be tested ", "because of local translation.\n", sep = "" ) } cat("-------------------------------------------------------------------------\n") cat("Please visit www.rpact.com to learn how to use rpact on FDA/GxP-compliant \n", "validated corporate computer systems and how to get a copy of the formal \n", "validation documentation that is customized and licensed for exclusive use \n", "by your company/organization, e.g., to fulfill regulatory requirements.\n", sep = "" ) } else { inputFileName <- file.path(outDir, "testthat.Rout.fail") if (file.exists(inputFileName)) { fileContent <- base::readChar(inputFileName, file.info(inputFileName)$size) if (completeUnitTestSetEnabled) { cat(.getTestthatResultNumberOfFailures(fileContent), " unit tests failed, i.e., the installation qualification was not successful.\n", sep = "" ) } else { cat(.getTestthatResultNumberOfFailures(fileContent), " unit tests failed :(\n", sep = "") } cat("Results:\n") cat(.getTestthatResultLine(fileContent), "\n") cat("Test results were written to directory '", outDir, "' (see file 'testthat.Rout.fail')\n", sep = "") } } if (!fullTestEnabled) { cat("-------------------------------------------------------------------------\n") cat("Note that only a small subset of all available unit tests were executed.\n") cat("You need a personal 'token' and 'secret' to perform all unit tests.\n") cat("You can find these data in the appendix of the validation documentation \n") cat("licensed for your company/organization.\n") } else if (!completeUnitTestSetEnabled) { cat("Note that only a small subset of all available unit tests were executed.\n") cat("Use testPackage(completeUnitTestSetEnabled = TRUE) to perform all unit tests.\n") } invisible(.isCompleteUnitTestSetEnabled()) } .testInstalledPackage <- function(testFileDirectory, ..., pkgName = "rpact", outDir = ".", Ropts = "") { .assertIsSingleCharacter(testFileDirectory, "testFileDirectory", naAllowed = FALSE) if (!dir.exists(testFileDirectory)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'testFileDirectory' (", testFileDirectory, ") does not exist") } workingDirectoryBefore <- setwd(outDir) on.exit(setwd(workingDirectoryBefore)) setwd(testFileDirectory) message(gettextf("Running specific tests for package %s", sQuote(pkgName)), domain = NA) testFiles <- dir(".", pattern = "\\.R$") for (testFile in testFiles) { message(gettextf(" Running %s", sQuote(testFile)), domain = NA) outfile <- paste0(testFile, "out") cmd <- paste( shQuote(file.path(R.home("bin"), "R")), "CMD BATCH --vanilla --no-timing", Ropts, shQuote(testFile), shQuote(outfile) ) cmd <- if (.Platform$OS.type == "windows") paste(cmd, "LANGUAGE=C") else paste("LANGUAGE=C", cmd) res <- system(cmd) if (res) { file.rename(outfile, paste(outfile, "fail", sep = ".")) return(invisible(1L)) } savefile <- paste(outfile, "save", sep = ".") if (file.exists(savefile)) { message( gettextf( " comparing %s to %s ...", sQuote(outfile), sQuote(savefile) ), appendLF = FALSE, domain = NA ) res <- Rdiff(outfile, savefile) if (!res) message(" OK") } } setwd(workingDirectoryBefore) return(invisible(0L)) } .isCompleteUnitTestSetEnabled <- function() { completeUnitTestSetEnabled <- as.logical(Sys.getenv("RPACT_COMPLETE_UNIT_TEST_SET_ENABLED")) if (is.na(completeUnitTestSetEnabled)) { completeUnitTestSetEnabled <- FALSE Sys.setenv("RPACT_COMPLETE_UNIT_TEST_SET_ENABLED" = completeUnitTestSetEnabled) } return(isTRUE(completeUnitTestSetEnabled)) } #' #' @title #' Test Plan Section #' #' @param section The section title or description. #' #' @description #' The section title or description will be used in the formal validation documentation. #' For more information visit \url{https://www.rpact.com} #' #' @export #' #' @keywords internal #' test_plan_section <- function(section) { cat("\n\n--- ", section, " ---\n", sep = "") } rpact/R/f_simulation_enrichment_means.R0000644000176200001440000010767214445307576020033 0ustar liggesusers## | ## | *Simulation of enrichment design with continuous data* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_simulation_enrichment.R NULL .getSimulationMeansEnrichmentStageSubjects <- function(..., stage, conditionalPower, conditionalCriticalValue, plannedSubjects, allocationRatioPlanned, selectedPopulations, thetaH1, overallEffects, stDevH1, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage) { stage <- stage - 1 # to be consistent with non-enrichment situation gMax <- nrow(overallEffects) if (!is.na(conditionalPower)) { if (any(selectedPopulations[1:gMax, stage + 1], na.rm = TRUE)) { if (is.na(thetaH1)) { thetaStandardized <- max(min(overallEffects[ selectedPopulations[1:gMax, stage + 1], stage ] / stDevH1, na.rm = TRUE), 1e-07) } else { max(thetaStandardized <- thetaH1 / stDevH1, 1e-07) } if (conditionalCriticalValue[stage] > 8) { newSubjects <- maxNumberOfSubjectsPerStage[stage + 1] } else { newSubjects <- (1 + allocationRatioPlanned[stage])^2 / allocationRatioPlanned[stage] * (max(0, conditionalCriticalValue[stage] + .getQNorm(conditionalPower)))^2 / thetaStandardized^2 newSubjects <- min( max(minNumberOfSubjectsPerStage[stage + 1], newSubjects), maxNumberOfSubjectsPerStage[stage + 1] ) } } else { newSubjects <- 0 } } else { newSubjects <- plannedSubjects[stage + 1] - plannedSubjects[stage] } return(newSubjects) } .getSimulatedStageMeansEnrichment <- function(..., design, subsets, prevalences, effects, stDevs, stratifiedAnalysis, plannedSubjects, typeOfSelection, effectMeasure, adaptations, epsilonValue, rValue, threshold, allocationRatioPlanned, minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage, conditionalPower, thetaH1, stDevH1, calcSubjectsFunction, calcSubjectsFunctionIsUserDefined, selectPopulationsFunction) { kMax <- length(plannedSubjects) pMax <- length(effects) gMax <- log(length(effects), 2) + 1 subjectsPerStage <- matrix(NA_real_, nrow = pMax, ncol = kMax) simEffects <- matrix(NA_real_, nrow = pMax, ncol = kMax) populationSubjectsPerStage <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallEffects <- matrix(NA_real_, nrow = gMax, ncol = kMax) testStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) overallTestStatistics <- matrix(NA_real_, nrow = gMax, ncol = kMax) separatePValues <- matrix(NA_real_, nrow = gMax, ncol = kMax) conditionalCriticalValue <- rep(NA_real_, kMax - 1) conditionalPowerPerStage <- rep(NA_real_, kMax) selectedPopulations <- matrix(FALSE, nrow = gMax, ncol = kMax) selectedSubsets <- matrix(FALSE, nrow = pMax, ncol = kMax) selectedPopulations[, 1] <- TRUE selectedSubsets[, 1] <- TRUE adjustedPValues <- rep(NA_real_, kMax) if (.isTrialDesignFisher(design)) { weights <- .getWeightsFisher(design) } else if (.isTrialDesignInverseNormal(design)) { weights <- .getWeightsInverseNormal(design) } for (k in 1:kMax) { const <- allocationRatioPlanned[k] / (1 + allocationRatioPlanned[k])^2 selectedSubsets[, k] <- .createSelectedSubsets(k, selectedPopulations) if (k == 1) { # subjectsPerStage[, k] <- stats::rmultinom(1, plannedSubjects[k], prevalences) subjectsPerStage[, k] <- plannedSubjects[k] * prevalences } else { prevSelected <- prevalences / sum(prevalences[selectedSubsets[, k]]) prevSelected[!selectedSubsets[, k]] <- 0 if (sum(prevSelected, na.rm = TRUE) > 0) { # subjectsPerStage[, k] <- stats::rmultinom(1, plannedSubjects[k] - plannedSubjects[k - 1], prevSelected) subjectsPerStage[, k] <- (plannedSubjects[k] - plannedSubjects[k - 1]) * prevSelected } else { break } } selsubs <- !is.na(subjectsPerStage[, k]) & subjectsPerStage[, k] > 0 simEffects[selsubs, k] <- stats::rnorm(rep(1, sum(selsubs)), effects[selsubs], stDevs[selsubs] / sqrt(subjectsPerStage[selsubs, k] * const)) if (gMax == 1) { testStatistics[1, k] <- simEffects[1, k] / stDevs[1] * sqrt(subjectsPerStage[1, k] * const) populationSubjectsPerStage[1, k] <- subjectsPerStage[1, k] overallEffects[1, k] <- sum(subjectsPerStage[1, 1:k] * simEffects[1, 1:k]) / sum(subjectsPerStage[1, 1:k]) overallTestStatistics[1, k] <- overallEffects[1, k] / (stDevs[1] / sqrt(sum(subjectsPerStage[1, 1:k]) * const)) } else if (gMax == 2) { # Population S1 testStatistics[1, k] <- simEffects[1, k] / stDevs[1] * sqrt(subjectsPerStage[1, k] * const) populationSubjectsPerStage[1, k] <- subjectsPerStage[1, k] overallEffects[1, k] <- sum(subjectsPerStage[1, 1:k] * simEffects[1, 1:k]) / sum(subjectsPerStage[1, 1:k]) overallTestStatistics[1, k] <- overallEffects[1, k] / (stDevs[1] / sqrt(sum(subjectsPerStage[1, 1:k]) * const)) # Full population testStatistics[2, k] <- sum(subjectsPerStage[1:2, k] * simEffects[1:2, k], na.rm = TRUE) * sqrt(const) / sqrt(sum(subjectsPerStage[1:2, k] * stDevs[1:2]^2, na.rm = TRUE)) populationSubjectsPerStage[2, k] <- sum(subjectsPerStage[1:2, k], na.rm = TRUE) overallEffects[2, k] <- sum(subjectsPerStage[1:2, 1:k] * simEffects[1:2, 1:k], na.rm = TRUE) / sum(subjectsPerStage[1:2, 1:k], na.rm = TRUE) sd <- sqrt(sum(subjectsPerStage[1:2, 1:k] * stDevs[1:2]^2, na.rm = TRUE) / sum(subjectsPerStage[1:2, 1:k], na.rm = TRUE)) overallTestStatistics[2, k] <- overallEffects[2, k] / sd * sqrt(sum(subjectsPerStage[1:2, 1:k], na.rm = TRUE) * const) } else if (gMax == 3) { # Population S1 testStatistics[1, k] <- sum(subjectsPerStage[c(1, 3), k] * simEffects[c(1, 3), k], na.rm = TRUE) * sqrt(const) / sqrt(sum(subjectsPerStage[c(1, 3), k] * stDevs[c(1, 3)]^2, na.rm = TRUE)) populationSubjectsPerStage[1, k] <- sum(subjectsPerStage[c(1, 3), k], na.rm = TRUE) overallEffects[1, k] <- sum(subjectsPerStage[c(1, 3), 1:k] * simEffects[c(1, 3), 1:k], na.rm = TRUE) / sum(subjectsPerStage[c(1, 3), 1:k], na.rm = TRUE) sd <- sqrt(sum(subjectsPerStage[c(1, 3), 1:k] * stDevs[c(1, 3)]^2, na.rm = TRUE) / sum(subjectsPerStage[c(1, 3), 1:k], na.rm = TRUE)) overallTestStatistics[1, k] <- overallEffects[1, k] / sd * sqrt(sum(subjectsPerStage[c(1, 3), 1:k], na.rm = TRUE) * const) # Population S2 testStatistics[2, k] <- sum(subjectsPerStage[c(2, 3), k] * simEffects[c(2, 3), k], na.rm = TRUE) * sqrt(const) / sqrt(sum(subjectsPerStage[c(2, 3), k] * stDevs[c(2, 3)]^2, na.rm = TRUE)) populationSubjectsPerStage[2, k] <- sum(subjectsPerStage[c(2, 3), k]) overallEffects[2, k] <- sum(subjectsPerStage[c(2, 3), 1:k] * simEffects[c(2, 3), 1:k], na.rm = TRUE) / sum(subjectsPerStage[c(2, 3), 1:k], na.rm = TRUE) sd <- sqrt(sum(subjectsPerStage[c(2, 3), 1:k] * stDevs[c(2, 3)]^2, na.rm = TRUE) / sum(subjectsPerStage[c(2, 3), 1:k], na.rm = TRUE)) overallTestStatistics[2, k] <- overallEffects[2, k] / sd * sqrt(sum(subjectsPerStage[c(2, 3), 1:k], na.rm = TRUE) * const) # Full population testStatistics[3, k] <- sum(subjectsPerStage[1:4, k] * simEffects[1:4, k], na.rm = TRUE) * sqrt(const) / sqrt(sum(subjectsPerStage[1:4, k] * stDevs[1:4]^2, na.rm = TRUE)) populationSubjectsPerStage[3, k] <- sum(subjectsPerStage[1:4, k]) overallEffects[3, k] <- sum(subjectsPerStage[1:4, 1:k] * simEffects[1:4, 1:k], na.rm = TRUE) / sum(subjectsPerStage[1:4, 1:k], na.rm = TRUE) sd <- sqrt(sum(subjectsPerStage[1:4, 1:k] * stDevs[1:4]^2, na.rm = TRUE) / sum(subjectsPerStage[1:4, 1:k], na.rm = TRUE)) overallTestStatistics[3, k] <- overallEffects[3, k] / sd * sqrt(sum(subjectsPerStage[1:4, 1:k], na.rm = TRUE) * const) } else if (gMax == 4) { # Population S1 testStatistics[1, k] <- sum(subjectsPerStage[c(1, 4, 5, 7), k] * simEffects[c(1, 4, 5, 7), k], na.rm = TRUE) * sqrt(const) / sqrt(sum(subjectsPerStage[c(1, 4, 5, 7), k] * stDevs[c(1, 4, 5, 7)]^2, na.rm = TRUE)) populationSubjectsPerStage[1, k] <- sum(subjectsPerStage[c(1, 4, 5, 7), k], na.rm = TRUE) overallEffects[1, k] <- sum(subjectsPerStage[c(1, 4, 5, 7), 1:k] * simEffects[c(1, 4, 5, 7), 1:k], na.rm = TRUE) / sum(subjectsPerStage[c(1, 4, 5, 7), 1:k], na.rm = TRUE) sd <- sqrt(sum(subjectsPerStage[c(1, 4, 5, 7), 1:k] * stDevs[c(1, 4, 5, 7)]^2, na.rm = TRUE) / sum(subjectsPerStage[c(1, 4, 5, 7), 1:k], na.rm = TRUE)) overallTestStatistics[1, k] <- overallEffects[1, k] / sd * sqrt(sum(subjectsPerStage[c(1, 4, 5, 7), 1:k], na.rm = TRUE) * const) # Population S2 testStatistics[2, k] <- sum(subjectsPerStage[c(2, 4, 6, 7), k] * simEffects[c(2, 4, 6, 7), k], na.rm = TRUE) * sqrt(const) / sqrt(sum(subjectsPerStage[c(2, 4, 6, 7), k] * stDevs[c(2, 4, 6, 7)]^2, na.rm = TRUE)) populationSubjectsPerStage[2, k] <- sum(subjectsPerStage[c(2, 4, 6, 7), k]) overallEffects[2, k] <- sum(subjectsPerStage[c(2, 4, 6, 7), 1:k] * simEffects[c(2, 4, 6, 7), 1:k], na.rm = TRUE) / sum(subjectsPerStage[c(2, 4, 6, 7), 1:k], na.rm = TRUE) sd <- sqrt(sum(subjectsPerStage[c(2, 4, 6, 7), 1:k] * stDevs[c(2, 4, 6, 7)]^2, na.rm = TRUE) / sum(subjectsPerStage[c(2, 4, 6, 7), 1:k], na.rm = TRUE)) overallTestStatistics[2, k] <- overallEffects[2, k] / sd * sqrt(sum(subjectsPerStage[c(2, 4, 6, 7), 1:k], na.rm = TRUE) * const) # Population S3 testStatistics[3, k] <- sum(subjectsPerStage[c(3, 5, 6, 7), k] * simEffects[c(3, 5, 6, 7), k], na.rm = TRUE) * sqrt(const) / sqrt(sum(subjectsPerStage[c(3, 5, 6, 7), k] * stDevs[c(3, 5, 6, 7)]^2, na.rm = TRUE)) populationSubjectsPerStage[3, k] <- sum(subjectsPerStage[c(3, 5, 6, 7), k]) overallEffects[3, k] <- sum(subjectsPerStage[c(3, 5, 6, 7), 1:k] * simEffects[c(3, 5, 6, 7), 1:k], na.rm = TRUE) / sum(subjectsPerStage[c(3, 5, 6, 7), 1:k], na.rm = TRUE) sd <- sqrt(sum(subjectsPerStage[c(3, 5, 6, 7), 1:k] * stDevs[c(3, 5, 6, 7)]^2, na.rm = TRUE) / sum(subjectsPerStage[c(3, 5, 6, 7), 1:k], na.rm = TRUE)) overallTestStatistics[3, k] <- overallEffects[3, k] / sd * sqrt(sum(subjectsPerStage[c(3, 5, 6, 7), 1:k], na.rm = TRUE) * const) # Full population testStatistics[4, k] <- sum(subjectsPerStage[1:8, k] * simEffects[1:8, k], na.rm = TRUE) * sqrt(const) / sqrt(sum(subjectsPerStage[1:8, k] * stDevs[1:8]^2, na.rm = TRUE)) populationSubjectsPerStage[4, k] <- sum(subjectsPerStage[1:8, k]) overallEffects[4, k] <- sum(subjectsPerStage[1:8, 1:k] * simEffects[1:8, 1:k], na.rm = TRUE) / sum(subjectsPerStage[1:8, 1:k], na.rm = TRUE) sd <- sqrt(sum(subjectsPerStage[1:8, 1:k] * stDevs[1:8]^2, na.rm = TRUE) / sum(subjectsPerStage[1:8, 1:k], na.rm = TRUE)) overallTestStatistics[4, k] <- overallEffects[4, k] / sd * sqrt(sum(subjectsPerStage[1:8, 1:k], na.rm = TRUE) * const) } testStatistics[!selectedPopulations[, k], k] <- NA_real_ overallEffects[!selectedPopulations[, k], k] <- NA_real_ overallTestStatistics[!selectedPopulations[, k], k] <- NA_real_ separatePValues[, k] <- 1 - stats::pnorm(testStatistics[, k]) if (k < kMax) { if (colSums(selectedPopulations)[k] == 0) { break } # Bonferroni adjustment adjustedPValues[k] <- min(min(separatePValues[, k], na.rm = TRUE) * colSums(selectedPopulations)[k], 1 - 1e-7) # conditional critical value to reject the null hypotheses at the next stage of the trial if (.isTrialDesignFisher(design)) { conditionalCriticalValue[k] <- .getOneMinusQNorm(min((design$criticalValues[k + 1] / prod(adjustedPValues[1:k]^weights[1:k]))^(1 / weights[k + 1]), 1 - 1e-7)) } else { conditionalCriticalValue[k] <- (design$criticalValues[k + 1] * sqrt(design$informationRates[k + 1]) - .getOneMinusQNorm(adjustedPValues[1:k]) %*% weights[1:k]) / sqrt(design$informationRates[k + 1] - design$informationRates[k]) } if (adaptations[k]) { if (effectMeasure == "testStatistic") { selectedPopulations[, k + 1] <- (selectedPopulations[, k] & .selectPopulations( k, overallTestStatistics[, k], typeOfSelection, epsilonValue, rValue, threshold, selectPopulationsFunction )) } else if (effectMeasure == "effectEstimate") { selectedPopulations[, k + 1] <- (selectedPopulations[, k] & .selectPopulations( k, overallEffects[, k], typeOfSelection, epsilonValue, rValue, threshold, selectPopulationsFunction )) } newSubjects <- calcSubjectsFunction( stage = k + 1, # to be consistent with non-enrichment situation, cf. line 36 conditionalPower = conditionalPower, conditionalCriticalValue = conditionalCriticalValue, plannedSubjects = plannedSubjects, allocationRatioPlanned = allocationRatioPlanned, selectedPopulations = selectedPopulations, thetaH1 = thetaH1, stDevH1 = stDevH1, overallEffects = overallEffects, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage ) if (is.null(newSubjects) || length(newSubjects) != 1 || !is.numeric(newSubjects) || is.na(newSubjects) || newSubjects < 0) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'calcSubjectsFunction' returned an illegal or undefined result (", newSubjects, "); ", "the output must be a single numeric value >= 0" ) } if (!is.na(conditionalPower) || calcSubjectsFunctionIsUserDefined) { plannedSubjects[(k + 1):kMax] <- plannedSubjects[k] + cumsum(rep(newSubjects, kMax - k)) } } else { selectedPopulations[, k + 1] <- selectedPopulations[, k] } if (is.na(thetaH1)) { thetaStandardized <- min(overallEffects[selectedPopulations[1:gMax, k], k] / stDevH1, na.rm = TRUE) } else { thetaStandardized <- thetaH1 / stDevH1 } conditionalPowerPerStage[k] <- 1 - stats::pnorm(conditionalCriticalValue[k] - thetaStandardized * sqrt(plannedSubjects[k + 1] - plannedSubjects[k]) * sqrt(allocationRatioPlanned[k]) / (1 + allocationRatioPlanned[k])) } } return(list( subjectsPerStage = subjectsPerStage, populationSubjectsPerStage = populationSubjectsPerStage, allocationRatioPlanned = allocationRatioPlanned, overallEffects = overallEffects, testStatistics = testStatistics, overallTestStatistics = overallTestStatistics, separatePValues = separatePValues, conditionalCriticalValue = conditionalCriticalValue, conditionalPowerPerStage = conditionalPowerPerStage, selectedPopulations = selectedPopulations )) } #' #' @title #' Get Simulation Enrichment Means #' #' @description #' Returns the simulated power, stopping and selection probabilities, conditional power, #' and expected sample size or testing means in an enrichment design testing situation. #' #' @inheritParams param_intersectionTest_Enrichment #' @inheritParams param_typeOfSelection #' @inheritParams param_effectMeasure #' @inheritParams param_adaptations #' @inheritParams param_threshold #' @inheritParams param_effectList #' @inheritParams param_stDevSimulation #' @inheritParams param_successCriterion #' @inheritParams param_typeOfSelection #' @inheritParams param_design_with_default #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_plannedSubjects #' @inheritParams param_minNumberOfSubjectsPerStage #' @inheritParams param_maxNumberOfSubjectsPerStage #' @inheritParams param_conditionalPowerSimulation #' @inheritParams param_thetaH1 #' @inheritParams param_stDevH1 #' @inheritParams param_maxNumberOfIterations #' @inheritParams param_calcSubjectsFunction #' @inheritParams param_selectPopulationsFunction #' @inheritParams param_rValue #' @inheritParams param_epsilonValue #' @inheritParams param_seed #' @inheritParams param_three_dots #' @inheritParams param_showStatistics #' @inheritParams param_stratifiedAnalysis #' #' @details #' At given design the function simulates the power, stopping probabilities, selection probabilities, #' and expected sample size at given number of subjects, parameter configuration, and population #' selection rule in the enrichment situation. #' An allocation ratio can be specified referring to the ratio of number of subjects in the active #' treatment groups as compared to the control group. #' #' The definition of \code{thetaH1} and/or \code{stDevH1} makes only sense if \code{kMax} > 1 #' and if \code{conditionalPower}, \code{minNumberOfSubjectsPerStage}, and #' \code{maxNumberOfSubjectsPerStage} (or \code{calcSubjectsFunction}) are defined. #' #' \code{calcSubjectsFunction}\cr #' This function returns the number of subjects at given conditional power and conditional #' critical value for specified testing situation. The function might depend on the variables #' \code{stage}, #' \code{selectedPopulations}, #' \code{plannedSubjects}, #' \code{allocationRatioPlanned}, #' \code{minNumberOfSubjectsPerStage}, #' \code{maxNumberOfSubjectsPerStage}, #' \code{conditionalPower}, #' \code{conditionalCriticalValue}, #' \code{overallEffects}, and #' \code{stDevH1}. #' The function has to contain the three-dots argument '...' (see examples). #' #' @template return_object_simulation_results #' @template how_to_get_help_for_generics #' #' @template examples_get_simulation_enrichment_means #' #' @export #' getSimulationEnrichmentMeans <- function(design = NULL, ..., effectList = NULL, intersectionTest = c("Simes", "SpiessensDebois", "Bonferroni", "Sidak"), # C_INTERSECTION_TEST_ENRICHMENT_DEFAULT stratifiedAnalysis = TRUE, # C_STRATIFIED_ANALYSIS_DEFAULT, adaptations = NA, typeOfSelection = c("best", "rBest", "epsilon", "all", "userDefined"), # C_TYPE_OF_SELECTION_DEFAULT effectMeasure = c("effectEstimate", "testStatistic"), # C_EFFECT_MEASURE_DEFAULT successCriterion = c("all", "atLeastOne"), # C_SUCCESS_CRITERION_DEFAULT epsilonValue = NA_real_, rValue = NA_real_, threshold = -Inf, plannedSubjects = NA_integer_, allocationRatioPlanned = NA_real_, minNumberOfSubjectsPerStage = NA_real_, maxNumberOfSubjectsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, stDevH1 = NA_real_, maxNumberOfIterations = 1000L, # C_MAX_SIMULATION_ITERATIONS_DEFAULT seed = NA_real_, calcSubjectsFunction = NULL, selectPopulationsFunction = NULL, showStatistics = FALSE) { if (is.null(design)) { design <- .getDefaultDesign(..., type = "simulation") .warnInCaseOfUnknownArguments( functionName = "getSimulationEnrichmentMeans", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "showStatistics"), ... ) } else { .assertIsTrialDesignInverseNormalOrFisher(design) .warnInCaseOfUnknownArguments(functionName = "getSimulationEnrichmentMeans", ignore = "showStatistics", ...) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsOneSidedDesign(design, designType = "enrichment", engineType = "simulation") calcSubjectsFunctionIsUserDefined <- !is.null(calcSubjectsFunction) simulationResults <- .createSimulationResultsEnrichmentObject( design = design, effectList = effectList, intersectionTest = intersectionTest, stratifiedAnalysis = stratifiedAnalysis, adaptations = adaptations, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, successCriterion = successCriterion, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, plannedSubjects = plannedSubjects, # means + rates only allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, # means + rates only maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, # means + rates only conditionalPower = conditionalPower, thetaH1 = thetaH1, # means + survival only stDevH1 = stDevH1, # means only maxNumberOfIterations = maxNumberOfIterations, seed = seed, calcSubjectsFunction = calcSubjectsFunction, # means + rates only selectPopulationsFunction = selectPopulationsFunction, showStatistics = showStatistics, endpoint = "means" ) design <- simulationResults$.design successCriterion <- simulationResults$successCriterion effectMeasure <- simulationResults$effectMeasure adaptations <- simulationResults$adaptations gMax <- simulationResults$populations kMax <- simulationResults$.design$kMax intersectionTest <- simulationResults$intersectionTest typeOfSelection <- simulationResults$typeOfSelection effectList <- simulationResults$effectList thetaH1 <- simulationResults$thetaH1 # means + survival only stDevH1 <- simulationResults$stDevH1 # means only conditionalPower <- simulationResults$conditionalPower minNumberOfSubjectsPerStage <- simulationResults$minNumberOfSubjectsPerStage maxNumberOfSubjectsPerStage <- simulationResults$maxNumberOfSubjectsPerStage allocationRatioPlanned <- simulationResults$allocationRatioPlanned calcSubjectsFunction <- simulationResults$calcSubjectsFunction if (length(allocationRatioPlanned) == 1) { allocationRatioPlanned <- rep(allocationRatioPlanned, kMax) } indices <- .getIndicesOfClosedHypothesesSystemForSimulation(gMax = gMax) cols <- nrow(effectList$effects) simulatedSelections <- array(0, dim = c(kMax, cols, gMax)) simulatedRejections <- array(0, dim = c(kMax, cols, gMax)) simulatedNumberOfPopulations <- matrix(0, nrow = kMax, ncol = cols) simulatedSubjectsPerStage <- array(0, dim = c(kMax, cols, 2^(gMax - 1))) simulatedSuccessStopping <- matrix(0, nrow = kMax, ncol = cols) simulatedFutilityStopping <- matrix(0, nrow = kMax - 1, ncol = cols) simulatedConditionalPower <- matrix(0, nrow = kMax, ncol = cols) simulatedRejectAtLeastOne <- rep(0, cols) expectedNumberOfSubjects <- rep(0, cols) iterations <- matrix(0, nrow = kMax, ncol = cols) len <- maxNumberOfIterations * kMax * gMax * cols dataIterationNumber <- rep(NA_real_, len) dataStageNumber <- rep(NA_real_, len) dataPopulationNumber <- rep(NA_real_, len) dataEffect <- rep(NA_real_, len) dataSubjectsPopulation <- rep(NA_real_, len) dataSubjectsActivePopulation <- rep(NA_real_, len) dataNumberOfSubjects <- rep(NA_real_, len) dataNumberOfCumulatedSubjects <- rep(NA_real_, len) dataRejectPerStage <- rep(NA, len) dataFutilityStop <- rep(NA_real_, len) dataSuccessStop <- rep(NA, len) dataFutilityStop <- rep(NA, len) dataTestStatistics <- rep(NA_real_, len) dataConditionalCriticalValue <- rep(NA_real_, len) dataConditionalPowerAchieved <- rep(NA_real_, len) dataEffectEstimate <- rep(NA_real_, len) dataPValuesSeparate <- rep(NA_real_, len) stDevs <- effectList$stDevs if (length(stDevs) == 1) { stDevs <- rep(stDevs, ncol(effectList$effects)) } if (is.na(stDevH1)) { stDevH1 <- max(stDevs, na.rm = TRUE) } index <- 1 for (i in 1:cols) { for (j in 1:maxNumberOfIterations) { stageResults <- .getSimulatedStageMeansEnrichment( design = design, subsets = effectList$subsets, prevalences = effectList$prevalences, effects = effectList$effects[i, ], stDevs = stDevs, stratifiedAnalysis = stratifiedAnalysis, plannedSubjects = plannedSubjects, typeOfSelection = typeOfSelection, effectMeasure = effectMeasure, adaptations = adaptations, epsilonValue = epsilonValue, rValue = rValue, threshold = threshold, allocationRatioPlanned = allocationRatioPlanned, minNumberOfSubjectsPerStage = minNumberOfSubjectsPerStage, maxNumberOfSubjectsPerStage = maxNumberOfSubjectsPerStage, conditionalPower = conditionalPower, thetaH1 = thetaH1, stDevH1 = stDevH1, calcSubjectsFunction = calcSubjectsFunction, calcSubjectsFunctionIsUserDefined = calcSubjectsFunctionIsUserDefined, selectPopulationsFunction = selectPopulationsFunction ) closedTest <- .performClosedCombinationTestForSimulationEnrichment( stageResults = stageResults, design = design, indices = indices, intersectionTest = intersectionTest, successCriterion = successCriterion ) rejectAtSomeStage <- FALSE rejectedPopulationsBefore <- rep(FALSE, gMax) for (k in 1:kMax) { simulatedRejections[k, i, ] <- simulatedRejections[k, i, ] + (closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore) simulatedSelections[k, i, ] <- simulatedSelections[k, i, ] + closedTest$selectedPopulations[, k] simulatedNumberOfPopulations[k, i] <- simulatedNumberOfPopulations[k, i] + sum(closedTest$selectedPopulations[, k]) if (!any(is.na(closedTest$successStop))) { simulatedSuccessStopping[k, i] <- simulatedSuccessStopping[k, i] + closedTest$successStop[k] } if ((kMax > 1) && (k < kMax)) { if (!any(is.na(closedTest$futilityStop))) { simulatedFutilityStopping[k, i] <- simulatedFutilityStopping[k, i] + (closedTest$futilityStop[k] && !closedTest$successStop[k]) } if (!closedTest$successStop[k] && !closedTest$futilityStop[k]) { simulatedConditionalPower[k + 1, i] <- simulatedConditionalPower[k + 1, i] + stageResults$conditionalPowerPerStage[k] } } iterations[k, i] <- iterations[k, i] + 1 for (p in 1:2^(gMax - 1)) { if (!is.na(stageResults$subjectsPerStage[p, k])) { simulatedSubjectsPerStage[k, i, p] <- simulatedSubjectsPerStage[k, i, p] + stageResults$subjectsPerStage[p, k] } } for (g in 1:gMax) { dataIterationNumber[index] <- j dataStageNumber[index] <- k dataPopulationNumber[index] <- g dataEffect[index] <- i dataSubjectsPopulation[index] <- round(stageResults$populationSubjectsPerStage[g, k], 1) dataSubjectsActivePopulation[index] <- round(stageResults$populationSubjectsPerStage[g, k], 1) dataNumberOfSubjects[index] <- round(sum(stageResults$populationSubjectsPerStage[, k], na.rm = TRUE), 1) dataNumberOfCumulatedSubjects[index] <- round(sum( stageResults$populationSubjectsPerStage[, 1:k], na.rm = TRUE ), 1) dataRejectPerStage[index] <- closedTest$rejected[g, k] dataTestStatistics[index] <- stageResults$testStatistics[g, k] dataSuccessStop[index] <- closedTest$successStop[k] if (k < kMax) { dataFutilityStop[index] <- closedTest$futilityStop[k] dataConditionalCriticalValue[index] <- stageResults$conditionalCriticalValue[k] dataConditionalPowerAchieved[index + 1] <- stageResults$conditionalPowerPerStage[k] } dataEffectEstimate[index] <- stageResults$overallEffects[g, k] dataPValuesSeparate[index] <- closedTest$separatePValues[g, k] index <- index + 1 } if (!rejectAtSomeStage && any(closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore)) { simulatedRejectAtLeastOne[i] <- simulatedRejectAtLeastOne[i] + 1 rejectAtSomeStage <- TRUE } if ((k < kMax) && (closedTest$successStop[k] || closedTest$futilityStop[k])) { # rejected hypotheses remain rejected also in case of early stopping simulatedRejections[(k + 1):kMax, i, ] <- simulatedRejections[(k + 1):kMax, i, ] + matrix( (closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore), kMax - k, gMax, byrow = TRUE ) break } rejectedPopulationsBefore <- closedTest$rejected[, k] & closedTest$selectedPopulations[1:gMax, k] | rejectedPopulationsBefore } } simulatedSubjectsPerStage[is.na(simulatedSubjectsPerStage)] <- 0 simulatedSubjectsPerStage[, i, ] <- simulatedSubjectsPerStage[, i, ] / iterations[, i] if (kMax > 1) { simulatedRejections[2:kMax, i, ] <- simulatedRejections[2:kMax, i, ] - simulatedRejections[1:(kMax - 1), i, ] stopping <- cumsum(simulatedSuccessStopping[1:(kMax - 1), i] + simulatedFutilityStopping[, i]) / maxNumberOfIterations expectedNumberOfSubjects[i] <- sum(simulatedSubjectsPerStage[1, i, ] + t(1 - stopping) %*% simulatedSubjectsPerStage[2:kMax, i, ]) } else { expectedNumberOfSubjects[i] <- sum(simulatedSubjectsPerStage[1, i, ]) } } simulatedConditionalPower[1, ] <- NA_real_ if (kMax > 1) { simulatedConditionalPower[2:kMax, ] <- simulatedConditionalPower[2:kMax, ] / iterations[2:kMax, ] } simulationResults$numberOfPopulations <- simulatedNumberOfPopulations / iterations simulationResults$rejectAtLeastOne <- simulatedRejectAtLeastOne / maxNumberOfIterations simulationResults$selectedPopulations <- simulatedSelections / maxNumberOfIterations simulationResults$rejectedPopulationsPerStage <- simulatedRejections / maxNumberOfIterations simulationResults$successPerStage <- simulatedSuccessStopping / maxNumberOfIterations simulationResults$futilityPerStage <- simulatedFutilityStopping / maxNumberOfIterations simulationResults$futilityStop <- base::colSums(simulatedFutilityStopping / maxNumberOfIterations) if (kMax > 1) { simulationResults$earlyStop <- simulationResults$futilityPerStage + simulationResults$successPerStage[1:(kMax - 1), ] simulationResults$conditionalPowerAchieved <- simulatedConditionalPower } simulationResults$sampleSizes <- simulatedSubjectsPerStage simulationResults$expectedNumberOfSubjects <- expectedNumberOfSubjects simulationResults$iterations <- iterations if (!all(is.na(simulationResults$conditionalPowerAchieved))) { simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } if (any(simulationResults$rejectedPopulationsPerStage < 0)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "internal error, simulation not possible due to numerical overflow") } data <- data.frame( iterationNumber = dataIterationNumber, stageNumber = dataStageNumber, populationNumber = dataPopulationNumber, effect = dataEffect, numberOfSubjects = dataNumberOfSubjects, numberOfCumulatedSubjects = dataNumberOfCumulatedSubjects, subjectsPopulation = dataSubjectsPopulation, effectEstimate = dataEffectEstimate, testStatistic = dataTestStatistics, pValue = dataPValuesSeparate, conditionalCriticalValue = round(dataConditionalCriticalValue, 6), conditionalPowerAchieved = round(dataConditionalPowerAchieved, 6), rejectPerStage = dataRejectPerStage, successStop = dataSuccessStop, futilityPerStage = dataFutilityStop ) data <- data[!is.na(data$effectEstimate), ] simulationResults$.data <- data return(simulationResults) } rpact/R/class_performance_score.R0000644000176200001440000000551514450551044016600 0ustar liggesusers## | ## | *Performance score classes* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7148 $ ## | Last changed: $Date: 2023-07-03 15:50:22 +0200 (Mo, 03 Jul 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' #' @name PerformanceScore #' #' @title #' Performance Score #' #' @description #' Contains the conditional performance score, its sub-scores and components according to #' Herrmann et al. (2020) for a given simulation result. #' #' @details #' Use \link{getPerformanceScore} to calculate the performance score. #' #' @include f_core_constants.R #' @include f_core_assertions.R #' @include f_core_plot.R #' @include class_core_parameter_set.R #' @include class_simulation_results.R #' #' @keywords internal #' #' @importFrom methods new #' PerformanceScore <- setRefClass("PerformanceScore", contains = "ParameterSet", fields = list( .simulationResults = "ANY", .plotSettings = "PlotSettings", .alternative = "numeric", locationSampleSize = "numeric", variationSampleSize = "numeric", subscoreSampleSize = "numeric", locationConditionalPower = "numeric", variationConditionalPower = "numeric", subscoreConditionalPower = "numeric", performanceScore = "numeric" ), methods = list( initialize = function(simulationResults, ...) { callSuper(.simulationResults = simulationResults, ...) .plotSettings <<- PlotSettings() .parameterNames <<- C_PARAMETER_NAMES .parameterFormatFunctions <<- C_PARAMETER_FORMAT_FUNCTIONS }, .show = function(showType = 1, digits = NA_integer_, consoleOutputEnabled = TRUE) { "Method for automatically printing performance score objects" .resetCat() if (!is.null(.simulationResults)) { .simulationResults$.show( showType = showType, digits = digits, showStatistics = FALSE, consoleOutputEnabled = consoleOutputEnabled, performanceScore = .self ) } }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) } ) ) rpact/R/f_analysis_base.R0000644000176200001440000031367014446750002015045 0ustar liggesusers## | ## | *Analysis functions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7139 $ ## | Last changed: $Date: 2023-06-28 08:15:31 +0200 (Mi, 28 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R #' @include f_logger.R #' @include f_object_r_code.R NULL .getDesignAndDataInput <- function(..., design, dataInput) { if (missing(design) && missing(dataInput)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, sQuote("dataInput"), " must be specified") } if (missing(dataInput) && !missing(design) && inherits(design, "Dataset")) { dataInput <- design if (!is.null(dataInput$.design) && inherits(dataInput$.design, "TrialDesign")) { design <- dataInput$.design } else { design <- .getDefaultDesign(..., type = "analysis") } } else if (!missing(dataInput) && missing(design)) { if (!is.null(dataInput$.design) && inherits(dataInput$.design, "TrialDesign")) { design <- dataInput$.design } else { design <- .getDefaultDesign(..., type = "analysis") } } else { .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsTrialDesign(design) .assertIsDataset(dataInput) return(list( design = design, dataInput = dataInput$copy(shallow = FALSE) )) } #' @title #' Get Analysis Results #' #' @description #' Calculates and returns the analysis results for the specified design and data. #' #' @inheritParams param_design #' @inheritParams param_dataInput #' @inheritParams param_directionUpper #' @inheritParams param_thetaH0 #' @inheritParams param_nPlanned #' @inheritParams param_allocationRatioPlanned #' @inheritParams param_stage #' @inheritParams param_maxInformation #' @inheritParams param_informationEpsilon #' @param ... Further arguments to be passed to methods (cf., separate functions in "See Also" below), e.g., #' \describe{ #' \item{\code{thetaH1} and \code{stDevH1} (or \code{assumedStDev} / \code{assumedStDevs}), #' \code{pi1}, \code{pi2}, or \code{piTreatments}, \code{piControl(s)}}{ #' The assumed effect size, standard deviation or rates to calculate the conditional power if \code{nPlanned} #' is specified. For survival designs, \code{thetaH1} refers to the hazard ratio. #' For one-armed trials with binary outcome, only \code{pi1} can be specified, for two-armed trials with binary outcome, #' \code{pi1} and \code{pi2} can be specified referring to the assumed treatment and control rate, respectively. #' In multi-armed or enrichment designs, you can #' specify a value or a vector with elements referring to the treatment arms or the sub-populations, #' respectively. For testing rates, the parameters to be specified are \code{piTreatments} and \code{piControl} #' (multi-arm designs) and \code{piTreatments} and \code{piControls} (enrichment designs).\cr #' If not specified, the conditional power is calculated under the assumption of observed effect sizes, #' standard deviations, rates, or hazard ratios.} #' \item{\code{iterations}}{Iterations for simulating the power for Fisher's combination test. #' If the power for more than one remaining stages is to be determined for #' Fisher's combination test, it is estimated via simulation with specified \cr #' \code{iterations}, the default is \code{1000}.} #' \item{\code{seed}}{Seed for simulating the conditional power for Fisher's combination test. #' See above, default is a random seed.} #' \item{\code{normalApproximation}}{The type of computation of the p-values. Default is \code{FALSE} for #' testing means (i.e., the t test is used) and \code{TRUE} for testing rates and the hazard ratio. #' For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test #' (one sample) or the exact test of Fisher (two samples) is used for calculating the p-values. #' In the survival setting, \code{normalApproximation = FALSE} has no effect.} #' \item{\code{equalVariances}}{The type of t test. For testing means in two treatment groups, either #' the t test assuming that the variances are equal or the t test without assuming this, #' i.e., the test of Welch-Satterthwaite is calculated, default is \code{TRUE}.} #' \item{\code{intersectionTest}}{Defines the multiple test for the intersection #' hypotheses in the closed system of hypotheses when testing multiple hypotheses. #' Five options are available in multi-arm designs: \code{"Dunnett"}, \code{"Bonferroni"}, \code{"Simes"}, #' \code{"Sidak"}, and \code{"Hierarchical"}, default is \code{"Dunnett"}. #' Four options are available in population enrichment designs: \code{"SpiessensDebois"} (one subset only), #' \code{"Bonferroni"}, \code{"Simes"}, and \code{"Sidak"}, default is \code{"Simes"}.} #' \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple treatment arms (> 2) #' or population enrichment designs for testing means. For multiple arms, three options are available: #' \code{"overallPooled"}, \code{"pairwisePooled"}, and \code{"notPooled"}, default is \code{"overallPooled"}. #' For enrichment designs, the options are: \code{"pooled"}, \code{"pooledFromFull"} (one subset only), #' and \code{"notPooled"}, default is \code{"pooled"}.} #' \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. #' For testing means and rates, also a non-stratified analysis based on overall data can be performed. #' For survival data, only a stratified analysis is possible (see Brannath et al., 2009), default is \code{TRUE}.} #' } #' #' @details #' Given a design and a dataset, at given stage the function calculates the test results #' (effect sizes, stage-wise test statistics and p-values, overall p-values and test statistics, #' conditional rejection probability (CRP), conditional power, Repeated Confidence Intervals (RCIs), #' repeated overall p-values, and final stage p-values, median unbiased effect estimates, #' and final confidence intervals. #' #' For designs with more than two treatments arms (multi-arm designs) or enrichment designs #' a closed combination test is performed. #' That is, additionally the statistics to be used in a closed testing procedure are provided. #' #' The conditional power is calculated if the planned sample size for the subsequent stages (\code{nPlanned}) #' is specified. The conditional power is calculated either under the assumption of the observed effect or #' under the assumption of an assumed effect, that has to be specified (see above).\cr #' For testing rates in a two-armed trial, pi1 and pi2 typically refer to the rates in the treatment #' and the control group, respectively. This is not mandatory, however, and so pi1 and pi2 can be interchanged. #' In many-to-one multi-armed trials, piTreatments and piControl refer to the rates in the treatment arms and #' the one control arm, and so they cannot be interchanged. piTreatments and piControls in enrichment designs #' can principally be interchanged, but we use the plural form to indicate that the rates can be differently #' specified for the sub-populations. #' #' Median unbiased effect estimates and confidence intervals are calculated if #' a group sequential design or an inverse normal combination test design was chosen, i.e., it is not applicable #' for Fisher's p-value combination test design. #' For the inverse normal combination test design with more than two stages, a warning informs that the validity #' of the confidence interval is theoretically shown only if no sample size change was performed. #' #' A final stage p-value for Fisher's combination test is calculated only if a two-stage design was chosen. #' For Fisher's combination test, the conditional power for more than one remaining stages is estimated via simulation. #' #' Final stage p-values, median unbiased effect estimates, and final confidence intervals are not calculated #' for multi-arm and enrichment designs. #' #' @return Returns an \code{\link{AnalysisResults}} object. #' The following generics (R generic functions) are available for this result object: #' \itemize{ #' \item \code{\link[=names.AnalysisResults]{names}} to obtain the field names, #' \item \code{\link[=print.ParameterSet]{print()}} to print the object, #' \item \code{\link[=summary.AnalysisResults]{summary()}} to display a summary of the object, #' \item \code{\link[=plot.AnalysisResults]{plot()}} to plot the object, #' \item \code{\link[=as.data.frame.AnalysisResults]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, #' \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. #' } #' @template how_to_get_help_for_generics #' #' @template details_analysis_base_mnormt_dependency #' #' @seealso #' \code{\link[=getObservedInformationRates]{getObservedInformationRates()}} #' #' @family analysis functions #' #' @template examples_get_analysis_results #' #' @export #' getAnalysisResults <- function(design, dataInput, ..., directionUpper = TRUE, # C_DIRECTION_UPPER_DEFAULT thetaH0 = NA_real_, nPlanned = NA_real_, allocationRatioPlanned = 1, # C_ALLOCATION_RATIO_DEFAULT stage = NA_integer_, maxInformation = NULL, informationEpsilon = NULL) { designAndDataInput <- .getDesignAndDataInput(design = design, dataInput = dataInput, ...) design <- designAndDataInput$design dataInput <- designAndDataInput$dataInput repeatedPValues <- NULL informationRatesRecalculated <- FALSE if (.isAlphaSpendingDesign(design) && (design$typeBetaSpending == "none") && .isTrialDesignGroupSequential(design) && !.isMultiArmDataset(dataInput)) { observedInformationRates <- NULL absoluteInformations <- NULL status <- NULL if (!is.null(maxInformation) && !is.na(maxInformation)) { showObservedInformationRatesMessage <- .getOptionalArgument( "showObservedInformationRatesMessage", optionalArgumentDefaultValue = TRUE, ... ) observedInformation <- getObservedInformationRates( dataInput, maxInformation = maxInformation, informationEpsilon = informationEpsilon, stage = stage, showObservedInformationRatesMessage = showObservedInformationRatesMessage ) observedInformationRates <- observedInformation$informationRates absoluteInformations <- observedInformation$absoluteInformations status <- observedInformation$status } else if (!is.null(informationEpsilon) && !is.na(informationEpsilon)) { warning("'informationEpsilon' (", .arrayToString(informationEpsilon), ") will be ignored because 'maxInformation' is undefined", call. = FALSE ) } if (!is.null(observedInformationRates)) { stageFromData <- dataInput$getNumberOfStages() if (!is.null(status) && status %in% c("under-running", "over-running") && length(observedInformationRates) > 1) { if (stageFromData == 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Recalculation of the information rates not possible at stage 1" ) } if (!(getLogLevel() %in% c(C_LOG_LEVEL_DISABLED, C_LOG_LEVEL_PROGRESS))) { message( "Calculate alpha values that have actually been spent ", "at earlier interim analyses at stage ", (stageFromData - 1) ) } .assertIsSingleInteger(stage, "stage", naAllowed = TRUE, validateType = FALSE) observedInformationRatesBefore <- getObservedInformationRates( dataInput, maxInformation = maxInformation, informationEpsilon = informationEpsilon, stage = ifelse(!is.na(stage), stage - 1, stageFromData - 1), showObservedInformationRatesMessage = FALSE )$informationRates if (length(observedInformationRatesBefore) < length(design$informationRates)) { for (i in (length(observedInformationRatesBefore) + 1):length(design$informationRates)) { if (observedInformationRatesBefore[length(observedInformationRatesBefore)] < 1) { observedInformationRatesBefore <- c(observedInformationRatesBefore, design$informationRates[i]) } } } designBefore <- eval(parse(text = getObjectRCode(design, newArgumentValues = list( informationRates = observedInformationRatesBefore ), stringWrapParagraphWidth = NULL ))) if (is.na(stage) || stage == stageFromData) { repeatedPValues <- getAnalysisResults( design = designBefore, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, stage = stageFromData - 1, maxInformation = maxInformation, informationEpsilon = informationEpsilon, showObservedInformationRatesMessage = FALSE )$repeatedPValues } userAlphaSpending <- designBefore$alphaSpent message( "Use alpha values that have actually been spent at earlier stages ", "and spend all remaining alpha at the final analysis, ", "i.e., userAlphaSpending = (", .arrayToString(userAlphaSpending, digits = 6), ") " ) observedInformationRates <- getObservedInformationRates( dataInput, maxInformation = absoluteInformations[stageFromData], informationEpsilon = informationEpsilon, stage = stage, showObservedInformationRatesMessage = FALSE )$informationRates design <- eval(parse(text = getObjectRCode(design, newArgumentValues = list( informationRates = observedInformationRates, userAlphaSpending = userAlphaSpending, typeOfDesign = C_TYPE_OF_DESIGN_AS_USER ), stringWrapParagraphWidth = NULL ))) options("rpact.analyis.repeated.p.values.warnings.enabled" = "FALSE") warning("Repeated p-values not available for automatic recalculation of boundaries at final stage", call. = FALSE ) } else { design <- eval(parse(text = getObjectRCode(design, newArgumentValues = list(informationRates = observedInformationRates), stringWrapParagraphWidth = NULL ))) } informationRatesRecalculated <- TRUE } } else { if (!is.null(maxInformation) && !is.na(maxInformation)) { warning("'maxInformation' (", .arrayToString(maxInformation), ") will be ignored because it is only applicable for alpha spending", "\n", "group sequential designs with no or fixed futility bounds and a single hypothesis", call. = FALSE ) } if (!is.null(informationEpsilon) && !is.na(informationEpsilon)) { warning("'informationEpsilon' (", .arrayToString(informationEpsilon), ") will be ignored because it is only applicable for alpha spending", "\n", "group sequential designs with no or fixed futility bounds and a single hypothesis", call. = FALSE ) } } result <- NULL if (.isEnrichmentDataset(dataInput)) { result <- .getAnalysisResultsEnrichment( design = design, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, stage = stage, ... ) } else if (.isMultiArmDataset(dataInput)) { result <- .getAnalysisResultsMultiArm( design = design, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, stage = stage, ... ) } else { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design, stage = stage, showWarnings = TRUE ) .assertIsValidDirectionUpper(directionUpper, sided = design$sided) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) on.exit(dataInput$.trim()) .assertIsValidThetaH0DataInput(thetaH0, dataInput) if (is.null(maxInformation) || is.na(maxInformation)) { .assertAreSuitableInformationRates(design, dataInput, stage = stage) } .assertIsValidNPlanned(nPlanned, design$kMax, stage, required = FALSE) .assertIsValidAllocationRatioPlanned(allocationRatioPlanned, numberOfGroups = dataInput$getNumberOfGroups() ) if (dataInput$isDatasetMeans()) { if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_MEANS_DEFAULT } result <- .getAnalysisResultsMeans( design = design, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, stage = stage, ... ) } else if (dataInput$isDatasetRates()) { if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_RATES_DEFAULT } result <- .getAnalysisResultsRates( design = design, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, stage = stage, ... ) } else if (dataInput$isDatasetSurvival()) { if (is.na(thetaH0)) { thetaH0 <- C_THETA_H0_SURVIVAL_DEFAULT } result <- .getAnalysisResultsSurvival( design = design, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, stage = stage, ... ) } if (is.null(result)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(dataInput), "' is not implemented yet") } if (informationRatesRecalculated) { result$maxInformation <- as.integer(maxInformation) result$.setParameterType("maxInformation", C_PARAM_USER_DEFINED) if (!is.null(informationEpsilon) && !is.na(informationEpsilon)) { result$informationEpsilon <- informationEpsilon result$.setParameterType("informationEpsilon", C_PARAM_USER_DEFINED) } } } if (!is.null(result) && !is.null(repeatedPValues)) { result$repeatedPValues <- repeatedPValues } if (design$kMax > 1 && .isTrialDesignInverseNormalOrGroupSequential(design) && design$typeOfDesign %in% c(C_TYPE_OF_DESIGN_AS_USER, C_TYPE_OF_DESIGN_NO_EARLY_EFFICACY)) { indices <- design$userAlphaSpending == 0 if (.isEnrichmentDataset(dataInput) || .isMultiArmDataset(dataInput)) { result$repeatedConfidenceIntervalLowerBounds[, indices] <- NA_real_ result$repeatedConfidenceIntervalUpperBounds[, indices] <- NA_real_ result$repeatedPValues[, indices] <- NA_real_ } else { result$repeatedConfidenceIntervalLowerBounds[indices] <- NA_real_ result$repeatedConfidenceIntervalUpperBounds[indices] <- NA_real_ result$repeatedPValues[indices] <- NA_real_ } } options("rpact.analyis.repeated.p.values.warnings.enabled" = "TRUE") return(result) } #' @title #' Get Stage Results #' #' @description #' Returns summary statistics and p-values for a given data set and a given design. #' #' @inheritParams param_design #' @inheritParams param_dataInput #' @inheritParams param_stage #' @param ... Further (optional) arguments to be passed: #' \describe{ #' \item{\code{thetaH0}}{The null hypothesis value, #' default is \code{0} for the normal and the binary case (testing means and rates, respectively), #' it is \code{1} for the survival case (testing the hazard ratio).\cr\cr #' For non-inferiority designs, \code{thetaH0} is the non-inferiority bound. #' That is, in case of (one-sided) testing of #' \itemize{ #' \item \emph{means}: a value \code{!= 0} #' (or a value \code{!= 1} for testing the mean ratio) can be specified. #' \item \emph{rates}: a value \code{!= 0} #' (or a value \code{!= 1} for testing the risk ratio \code{pi1 / pi2}) can be specified. #' \item \emph{survival data}: a bound for testing H0: #' \code{hazard ratio = thetaH0 != 1} can be specified. #' } #' For testing a rate in one sample, a value \code{thetaH0} in (0, 1) has to be specified for #' defining the null hypothesis H0: \code{pi = thetaH0}.} #' \item{\code{normalApproximation}}{The #' type of computation of the p-values. Default is \code{FALSE} for #' testing means (i.e., the t test is used) and \code{TRUE} for testing rates and the hazard ratio. #' For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test #' (one sample) or the exact test of Fisher (two samples) is used for calculating the p-values. #' In the survival setting, \code{normalApproximation = FALSE} has no effect.} #' \item{\code{equalVariances}}{The type of t test. For testing means in two treatment groups, either #' the t test assuming that the variances are equal or the t test without assuming this, #' i.e., the test of Welch-Satterthwaite is calculated, default is \code{TRUE}.} #' \item{\code{directionUpper}}{The direction of one-sided testing. #' Default is \code{TRUE} which means that larger values of the #' test statistics yield smaller p-values.} #' \item{\code{intersectionTest}}{Defines the multiple test for the intersection #' hypotheses in the closed system of hypotheses when testing multiple hypotheses. #' Five options are available in multi-arm designs: \code{"Dunnett"}, \code{"Bonferroni"}, \code{"Simes"}, #' \code{"Sidak"}, and \code{"Hierarchical"}, default is \code{"Dunnett"}. #' Four options are available in population enrichment designs: \code{"SpiessensDebois"} (one subset only), #' \code{"Bonferroni"}, \code{"Simes"}, and \code{"Sidak"}, default is \code{"Simes"}.} #' \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple treatment arms (> 2) #' or population enrichment designs for testing means. For multiple arms, three options are available: #' \code{"overallPooled"}, \code{"pairwisePooled"}, and \code{"notPooled"}, default is \code{"overallPooled"}. #' For enrichment designs, the options are: \code{"pooled"}, \code{"pooledFromFull"} (one subset only), #' and \code{"notPooled"}, default is \code{"pooled"}.} #' \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. #' For testing means and rates, also a non-stratified analysis based on overall data can be performed. #' For survival data, only a stratified analysis is possible (see Brannath et al., 2009), default is \code{TRUE}.} #' } #' #' @details #' Calculates and returns the stage results of the specified design and data input at the specified stage. #' #' @return Returns a \code{\link{StageResults}} object. #' \itemize{ #' \item \code{\link[=names.StageResults]{names}} to obtain the field names, #' \item \code{\link[=print.FieldSet]{print()}} to print the object, #' \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, #' \item \code{\link[=plot.StageResults]{plot()}} to plot the object, #' \item \code{\link[=as.data.frame.StageResults]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, #' \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. #' } #' @template how_to_get_help_for_generics #' #' @family analysis functions #' #' @template examples_get_stage_results #' #' @export #' getStageResults <- function(design, dataInput, ..., stage = NA_integer_) { designAndDataInput <- .getDesignAndDataInput(design = design, dataInput = dataInput, ...) design <- designAndDataInput$design dataInput <- designAndDataInput$dataInput if (.isEnrichmentDataset(dataInput)) { return(.getStageResultsEnrichment( design = design, dataInput = dataInput, stage = stage, ... )) } else if (.isMultiArmDataset(dataInput)) { return(.getStageResultsMultiArm( design = design, dataInput = dataInput, stage = stage, ... )) } else { stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design, stage = stage) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) on.exit(dataInput$.trim()) if (dataInput$isDatasetMeans()) { return(.getStageResultsMeans( design = design, dataInput = dataInput, stage = stage, userFunctionCallEnabled = TRUE, ... )) } if (dataInput$isDatasetRates()) { return(.getStageResultsRates( design = design, dataInput = dataInput, stage = stage, userFunctionCallEnabled = TRUE, ... )) } if (dataInput$isDatasetSurvival()) { return(.getStageResultsSurvival( design = design, dataInput = dataInput, stage = stage, userFunctionCallEnabled = TRUE, ... )) } } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(dataInput), "' is not supported") } .getStageFromOptionalArguments <- function(..., dataInput, design, showWarnings = FALSE) { .assertIsTrialDesign(design) stage <- .getOptionalArgument("stage", ...) if (!is.null(stage) && !is.na(stage)) { .assertIsValidStage(stage, design$kMax) if (showWarnings) { .assertIsDataset(dataInput) if (stage > dataInput$getNumberOfStages()) { warning("'stage' (", stage, ") will be ignored because 'dataInput' ", "has only ", dataInput$getNumberOfStages(), " stages defined", call. = FALSE ) } } return(as.integer(stage)) } .assertIsDataset(dataInput) stage <- dataInput$getNumberOfStages() stage <- min(stage, design$kMax) stage <- as.integer(stage) .assertIsValidStage(stage, design$kMax) return(stage) } #' #' @title #' Get Test Actions #' #' @description #' Returns test actions. #' #' @inheritParams param_stageResults #' @param ... Only available for backward compatibility. #' #' @details #' Returns the test actions of the specified design and stage results at the specified stage. #' #' @return Returns a \code{\link[base]{character}} vector of length \code{kMax} #' Returns a \code{\link[base]{numeric}} vector of length \code{kMax}containing the test actions of each stage. #' #' @family analysis functions #' #' @template examples_get_test_actions #' #' @export #' getTestActions <- function(stageResults, ...) { .warnInCaseOfUnknownArguments(functionName = "getTestActions", ...) stageResults <- .getStageResultsObject(stageResults, functionName = "getTestActions", ...) .assertIsStageResultsNonMultiHypotheses(stageResults) design <- stageResults$.design testActions <- rep(NA_character_, design$kMax) if (.isTrialDesignInverseNormal(design)) { for (k in 1:stageResults$stage) { if (design$sided == 1) { if (k < design$kMax) { if (stageResults$combInverseNormal[k] > design$criticalValues[k]) { testActions[k] <- "reject and stop" } else if (stageResults$combInverseNormal[k] < design$futilityBounds[k]) { testActions[k] <- "accept and stop" } else { testActions[k] <- "continue" } } else { if (stageResults$combInverseNormal[k] > design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } if (design$sided == 2) { if (k < design$kMax) { if (abs(stageResults$combInverseNormal[k]) > design$criticalValues[k]) { testActions[k] <- "reject and stop" } else { testActions[k] <- "continue" } } else { if (abs(stageResults$combInverseNormal[k]) > design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } } } else if (.isTrialDesignGroupSequential(design)) { for (k in 1:stageResults$stage) { if (design$sided == 1) { if (k < design$kMax) { if (.getOneMinusQNorm(stageResults$overallPValues[k]) > design$criticalValues[k]) { testActions[k] <- "reject and stop" } else if (.getOneMinusQNorm(stageResults$overallPValues[k]) < design$futilityBounds[k]) { testActions[k] <- "accept and stop" } else { testActions[k] <- "continue" } } else { if (.getOneMinusQNorm(stageResults$overallPValues[k]) > design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } if (design$sided == 2) { if (k < design$kMax) { if (abs(.getOneMinusQNorm(stageResults$overallPValues[k])) > design$criticalValues[k]) { testActions[k] <- "reject and stop" } else { testActions[k] <- "continue" } } else { if (abs(.getOneMinusQNorm(stageResults$overallPValues[k])) > design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } } } else if (.isTrialDesignFisher(design)) { for (k in 1:stageResults$stage) { if (design$sided == 1) { if (k < design$kMax) { if (stageResults$combFisher[k] < design$criticalValues[k]) { testActions[k] <- "reject and stop" } else if (stageResults$pValues[k] > design$alpha0Vec[k]) { testActions[k] <- "accept and stop" } else { testActions[k] <- "continue" } } else { if (stageResults$combFisher[k] < design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } if (design$sided == 2) { if (k < design$kMax) { if (min(stageResults$combFisher[k], 1 - stageResults$combFisher[k]) < design$criticalValues[k]) { testActions[k] <- "reject and stop" } else { testActions[k] <- "continue" } } else { if (min(stageResults$combFisher[k], 1 - stageResults$combFisher[k]) < design$criticalValues[k]) { testActions[k] <- "reject" } else { testActions[k] <- "accept" } } } } } return(testActions) } #' #' @title #' Get Repeated Confidence Intervals #' #' @description #' Calculates and returns the lower and upper limit of the repeated confidence intervals of the trial. #' #' @inheritParams param_design #' @inheritParams param_dataInput #' @inheritParams param_directionUpper #' @inheritParams param_tolerance #' @inheritParams param_stage #' @param ... Further arguments to be passed to methods (cf., separate functions in "See Also" below), e.g., #' \describe{ #' \item{\code{normalApproximation}}{The type of computation of the p-values. Default is \code{FALSE} for #' testing means (i.e., the t test is used) and \code{TRUE} for testing rates and the hazard ratio. #' For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test #' (one sample) or the exact test of Fisher (two samples) is used for calculating the p-values. #' In the survival setting, \code{normalApproximation = FALSE} has no effect.} #' \item{\code{equalVariances}}{The type of t test. For testing means in two treatment groups, either #' the t test assuming that the variances are equal or the t test without assuming this, #' i.e., the test of Welch-Satterthwaite is calculated, default is \code{TRUE}.} #' \item{\code{intersectionTest}}{Defines the multiple test for the intersection #' hypotheses in the closed system of hypotheses when testing multiple hypotheses. #' Five options are available in multi-arm designs: \code{"Dunnett"}, \code{"Bonferroni"}, \code{"Simes"}, #' \code{"Sidak"}, and \code{"Hierarchical"}, default is \code{"Dunnett"}. #' Four options are available in population enrichment designs: \code{"SpiessensDebois"} (one subset only), #' \code{"Bonferroni"}, \code{"Simes"}, and \code{"Sidak"}, default is \code{"Simes"}.} #' \item{\code{varianceOption}}{Defines the way to calculate the variance in multiple treatment arms (> 2) #' or population enrichment designs for testing means. For multiple arms, three options are available: #' \code{"overallPooled"}, \code{"pairwisePooled"}, and \code{"notPooled"}, default is \code{"overallPooled"}. #' For enrichment designs, the options are: \code{"pooled"}, \code{"pooledFromFull"} (one subset only), #' and \code{"notPooled"}, default is \code{"pooled"}.} #' \item{\code{stratifiedAnalysis}}{For enrichment designs, typically a stratified analysis should be chosen. #' For testing means and rates, also a non-stratified analysis based on overall data can be performed. #' For survival data, only a stratified analysis is possible (see Brannath et al., 2009), default is \code{TRUE}.} #' } #' #' @details #' The repeated confidence interval at a given stage of the trial contains the #' parameter values that are not rejected using the specified sequential design. #' It can be calculated at each stage of the trial and can thus be used as a monitoring tool. #' #' The repeated confidence intervals are provided up to the specified stage. #' #' @return Returns a \code{\link[base]{matrix}} with \code{2} rows #' and \code{kMax} columns containing the lower RCI limits in the first row and #' the upper RCI limits in the second row, where each column represents a stage. #' #' @family analysis functions #' #' @template examples_get_repeated_confidence_intervals #' #' @export #' getRepeatedConfidenceIntervals <- function(design, dataInput, ..., directionUpper = TRUE, # C_DIRECTION_UPPER_DEFAULT tolerance = 1e-06, # C_ANALYSIS_TOLERANCE_DEFAULT stage = NA_integer_) { .assertIsValidTolerance(tolerance) designAndDataInput <- .getDesignAndDataInput(design = design, dataInput = dataInput, ...) design <- designAndDataInput$design dataInput <- designAndDataInput$dataInput if (.isEnrichmentDataset(dataInput)) { return(.getRepeatedConfidenceIntervalsEnrichment( design = design, dataInput = dataInput, stage = stage, ... )) } if (.isMultiArmDataset(dataInput)) { return(.getRepeatedConfidenceIntervalsMultiArm( design = design, dataInput = dataInput, stage = stage, ... )) } stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design, stage = stage) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) on.exit(dataInput$.trim()) if (dataInput$isDatasetMeans()) { return(.getRepeatedConfidenceIntervalsMeans( design = design, dataInput = dataInput, directionUpper = directionUpper, tolerance = tolerance, stage = stage, ... )) } if (dataInput$isDatasetRates()) { return(.getRepeatedConfidenceIntervalsRates( design = design, dataInput = dataInput, directionUpper = directionUpper, tolerance = tolerance, stage = stage, ... )) } if (dataInput$isDatasetSurvival()) { return(.getRepeatedConfidenceIntervalsSurvival( design = design, dataInput = dataInput, directionUpper = directionUpper, tolerance = tolerance, stage = stage, ... )) } stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(dataInput), "' is not implemented yet") } .getStageResultsObject <- function(stageResults, ..., functionName) { if (missing(stageResults)) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'stageResults' must be defined") } .stopInCaseOfIllegalStageDefinition(stageResults, ...) args <- list(...) if (.isTrialDesign(stageResults)) { if (length(args) == 0) { stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'stageResults' must be defined") } stageResults <- args[[1]] .logDebug( "The separate specification of the design in ", functionName, "() is deprecated ", "because the 'stageResults' object contains the design already" ) } if (.isDataset(stageResults)) { stageResults <- getStageResults(dataInput = stageResults, ...) } if (!.isStageResults(stageResults)) { for (arg in args) { if (.isStageResults(arg)) { return(arg) } } stop(C_EXCEPTION_TYPE_MISSING_ARGUMENT, "'stageResults' must be defined") } return(stageResults) } #' #' @title #' Get Conditional Power #' #' @description #' Calculates and returns the conditional power. #' #' @inheritParams param_stageResults #' @inheritParams param_nPlanned #' @inheritParams param_allocationRatioPlanned #' @param ... Further (optional) arguments to be passed: #' \describe{ #' \item{\code{thetaH1} and \code{stDevH1} (or \code{assumedStDev} / \code{assumedStDevs}), #' \code{pi1}, \code{pi2}, or \code{piTreatments}, \code{piControl(s)}}{ #' The assumed effect size, standard deviation or rates to calculate the conditional power if \code{nPlanned} #' is specified. For survival designs, \code{thetaH1} refers to the hazard ratio. #' For one-armed trials with binary outcome, only \code{pi1} can be specified, for two-armed trials with binary outcome, #' \code{pi1} and \code{pi2} can be specified referring to the assumed treatment and control rate, respectively. #' In multi-armed or enrichment designs, you can #' specify a value or a vector with elements referring to the treatment arms or the sub-populations, #' respectively. For testing rates, the parameters to be specified are \code{piTreatments} and \code{piControl} (multi-arm #' designs) and \code{piTreatments} and \code{piControls} (enrichment designs).\cr #' If not specified, the conditional power is calculated under the assumption of observed effect sizes, #' standard deviations, rates, or hazard ratios.} #' \item{\code{iterations}}{Iterations for simulating the power for Fisher's combination test. #' If the power for more than one remaining stages is to be determined for #' Fisher's combination test, it is estimated via simulation with specified \cr #' \code{iterations}, the default is \code{1000}.} #' \item{\code{seed}}{Seed for simulating the conditional power for Fisher's combination test. #' See above, default is a random seed.} #' } #' #' @details #' The conditional power is calculated if the planned sample size for the subsequent stages is specified.\cr #' For testing rates in a two-armed trial, pi1 and pi2 typically refer to the rates in the treatment #' and the control group, respectively. This is not mandatory, however, and so pi1 and pi2 can be interchanged. #' In many-to-one multi-armed trials, piTreatments and piControl refer to the rates in the treatment arms and #' the one control arm, and so they cannot be interchanged. piTreatments and piControls in enrichment designs #' can principally be interchanged, but we use the plural form to indicate that the rates can be differently #' specified for the sub-populations. #' #' For Fisher's combination test, the conditional power for more than one remaining stages is #' estimated via simulation. #' #' @seealso #' \code{\link[=plot.StageResults]{plot.StageResults()}} or \code{\link[=plot.AnalysisResults]{plot.AnalysisResults()}} #' for plotting the conditional power. #' #' @return Returns a \code{\link{ConditionalPowerResults}} object. #' The following generics (R generic functions) are available for this result object: #' \itemize{ #' \item \code{\link[=names.FieldSet]{names()}} to obtain the field names, #' \item \code{\link[=print.FieldSet]{print()}} to print the object, #' \item \code{\link[=summary.ParameterSet]{summary()}} to display a summary of the object, #' \item \code{\link[=plot.ParameterSet]{plot()}} to plot the object, #' \item \code{\link[=as.data.frame.ParameterSet]{as.data.frame()}} to coerce the object to a \code{\link[base]{data.frame}}, #' \item \code{\link[=as.matrix.FieldSet]{as.matrix()}} to coerce the object to a \code{\link[base]{matrix}}. #' } #' @template how_to_get_help_for_generics #' #' @family analysis functions #' #' @template examples_get_conditional_power #' #' @export #' getConditionalPower <- function(stageResults, ..., nPlanned, allocationRatioPlanned = 1 # C_ALLOCATION_RATIO_DEFAULT ) { # .stopInCaseOfIllegalStageDefinition(stageResults, ...) stageResults <- .getStageResultsObject(stageResults = stageResults, functionName = "getConditionalPower", ...) .assertIsValidAllocationRatioPlanned(allocationRatioPlanned, stageResults$.dataInput$getNumberOfGroups()) conditionalPower <- NULL if (.isEnrichmentStageResults(stageResults)) { conditionalPower <- .getConditionalPowerEnrichment( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... ) } else if (.isMultiArmStageResults(stageResults)) { conditionalPower <- .getConditionalPowerMultiArm( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... ) } else { .assertIsStageResults(stageResults) if (stageResults$isDatasetMeans()) { conditionalPower <- .getConditionalPowerMeans( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... ) } else if (stageResults$isDatasetRates()) { conditionalPower <- .getConditionalPowerRates( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... ) } else if (stageResults$isDatasetSurvival()) { conditionalPower <- .getConditionalPowerSurvival( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... ) } } if (!is.null(conditionalPower)) { addPlotData <- .getOptionalArgument("addPlotData", ...) if (!is.null(addPlotData) && isTRUE(addPlotData)) { conditionalPower$.plotData <- .getConditionalPowerPlot( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... ) } conditionalPower$.setParameterType("nPlanned", C_PARAM_USER_DEFINED) conditionalPower$.setParameterType( "allocationRatioPlanned", ifelse(allocationRatioPlanned == C_ALLOCATION_RATIO_DEFAULT, C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED ) ) return(conditionalPower) } else { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(stageResults$.dataInput), "' is not implemented yet" ) } } .getConditionalPowerPlot <- function(..., stageResults, nPlanned, allocationRatioPlanned = NA_real_) { if (.isMultiArmStageResults(stageResults)) { return(.getConditionalPowerPlotMultiArm( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } if (.isEnrichmentStageResults(stageResults)) { return(.getConditionalPowerPlotEnrichment( stageResults = stageResults, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } .assertIsStageResults(stageResults) .stopInCaseOfIllegalStageDefinition2(...) stage <- stageResults$stage if (stage == stageResults$.design$kMax && length(nPlanned) > 0) { stage <- stageResults$.design$kMax - 1 } .assertIsValidNPlanned(nPlanned, stageResults$.design$kMax, stage) if (is.na(allocationRatioPlanned)) { allocationRatioPlanned <- C_ALLOCATION_RATIO_DEFAULT } if (stageResults$isDatasetMeans()) { return(.getConditionalPowerPlotMeans( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } if (stageResults$isDatasetRates()) { return(.getConditionalPowerPlotRates( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } if (stageResults$isDatasetSurvival()) { return(.getConditionalPowerPlotSurvival( stageResults = stageResults, stage = stage, nPlanned = nPlanned, allocationRatioPlanned = allocationRatioPlanned, ... )) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(stageResults$.dataInput), "' is not implemented yet" ) } #' #' @title #' Get Repeated P Values #' #' @description #' Calculates the repeated p-values for a given test results. #' #' @inheritParams param_stageResults #' @inheritParams param_tolerance #' @inheritParams param_three_dots #' #' @details #' The repeated p-value at a given stage of the trial is defined as the smallest #' significance level under which at given test design the test results #' obtain rejection of the null hypothesis. It can be calculated at each #' stage of the trial and can thus be used as a monitoring tool. #' #' The repeated p-values are provided up to the specified stage. #' #' In multi-arm trials, the repeated p-values are defined separately for each #' treatment comparison within the closed testing procedure. #' #' @template details_analysis_base_mnormt_dependency #' #' @return Returns a \code{\link[base]{numeric}} vector of length \code{kMax} or in case of multi-arm stage results #' a \code{\link[base]{matrix}} (each column represents a stage, each row a comparison) #' containing the repeated p values. #' #' @family analysis functions #' #' @template examples_get_repeated_p_values #' #' @export #' getRepeatedPValues <- function(stageResults, ..., tolerance = 1e-06 # C_ANALYSIS_TOLERANCE_DEFAULT ) { .assertIsValidTolerance(tolerance) .assertIsValidTolerance(tolerance) stageResults <- .getStageResultsObject(stageResults, functionName = "getRepeatedPValues", ...) if (.isEnrichmentStageResults(stageResults)) { return(.getRepeatedPValuesEnrichment(stageResults = stageResults, tolerance = tolerance, ...)) } if (.isMultiArmStageResults(stageResults)) { return(.getRepeatedPValuesMultiArm(stageResults = stageResults, tolerance = tolerance, ...)) } .assertIsStageResults(stageResults) design <- stageResults$.design if (design$kMax == 1) { return(ifelse(design$sided == 1, stageResults$pValues[1], 2 * min(stageResults$pValues[1], 1 - stageResults$pValues[1]) )) } if (.isTrialDesignInverseNormalOrGroupSequential(design) && design$typeOfDesign %in% c(C_TYPE_OF_DESIGN_AS_USER, C_TYPE_OF_DESIGN_WT_OPTIMUM)) { showWarnings <- as.logical(getOption("rpact.analyis.repeated.p.values.warnings.enabled", "TRUE")) if (showWarnings) { warning("Repeated p-values not available for 'typeOfDesign' = '", design$typeOfDesign, "'", call. = FALSE ) } return(rep(NA_real_, design$kMax)) } if (.isTrialDesignInverseNormal(design)) { return(.getRepeatedPValuesInverseNormal( stageResults = stageResults, tolerance = tolerance, ... )) } if (.isTrialDesignGroupSequential(design)) { return(.getRepeatedPValuesGroupSequential( stageResults = stageResults, tolerance = tolerance, ... )) } if (.isTrialDesignFisher(design)) { if (design$method == C_FISHER_METHOD_USER_DEFINED_ALPHA) { warning("Repeated p-values not available for 'method' = '", C_FISHER_METHOD_USER_DEFINED_ALPHA, "'", call. = FALSE ) return(rep(NA_real_, design$kMax)) } return(.getRepeatedPValuesFisher( stageResults = stageResults, tolerance = tolerance, ... )) } .stopWithWrongDesignMessage(design, inclusiveConditionalDunnett = FALSE) } # # Get final p-value based on inverse normal method # .getFinalPValueInverseNormalOrGroupSequential <- function(stageResults) { design <- stageResults$.design .assertIsTrialDesignInverseNormalOrGroupSequential(design) if (.isTrialDesignInverseNormal(design)) { stageInverseNormalOrGroupSequential <- .getStageInverseNormal( design = design, stageResults = stageResults, stage = stageResults$stage ) } else { stageInverseNormalOrGroupSequential <- .getStageGroupSeq( design = design, stageResults = stageResults, stage = stageResults$stage ) } finalStage <- min(stageInverseNormalOrGroupSequential, design$kMax) # Early stopping or at end of study if (stageInverseNormalOrGroupSequential < design$kMax || stageResults$stage == design$kMax) { if (stageInverseNormalOrGroupSequential == 1) { pFinal <- stageResults$pValues[1] } else { if (design$bindingFutility) { if (.isTrialDesignInverseNormal(design)) { decisionMatrix <- matrix( c( design$futilityBounds[1:(finalStage - 1)], C_FUTILITY_BOUNDS_DEFAULT, c(design$criticalValues[1:(finalStage - 1)], stageResults$combInverseNormal[finalStage]) ), nrow = 2, byrow = TRUE ) } else { decisionMatrix <- matrix( c( design$futilityBounds[1:(finalStage - 1)], C_FUTILITY_BOUNDS_DEFAULT, c(design$criticalValues[1:(finalStage - 1)], .getOneMinusQNorm(stageResults$overallPValues[finalStage])) ), nrow = 2, byrow = TRUE ) } } else { if (.isTrialDesignInverseNormal(design)) { decisionMatrix <- matrix( c( rep(C_FUTILITY_BOUNDS_DEFAULT, finalStage), c(design$criticalValues[1:(finalStage - 1)], stageResults$combInverseNormal[finalStage]) ), nrow = 2, byrow = TRUE ) } else { decisionMatrix <- matrix( c( rep(C_FUTILITY_BOUNDS_DEFAULT, finalStage), c(design$criticalValues[1:(finalStage - 1)], .getOneMinusQNorm(stageResults$overallPValues[finalStage])) ), nrow = 2, byrow = TRUE ) } } probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = design$informationRates[1:finalStage] ) pFinal <- sum(probs[3, ] - probs[2, ]) if (design$sided == 2) { if (stageInverseNormalOrGroupSequential == 1) { pFinalOtherDirection <- 1 - stageResults$pValues[1] } else { if (.isTrialDesignInverseNormal(design)) { decisionMatrix <- matrix( c( rep(C_FUTILITY_BOUNDS_DEFAULT, finalStage), c(design$criticalValues[1:(finalStage - 1)], -stageResults$combInverseNormal[finalStage]) ), nrow = 2, byrow = TRUE ) } else { decisionMatrix <- matrix( c( rep(C_FUTILITY_BOUNDS_DEFAULT, finalStage), c( design$criticalValues[1:(finalStage - 1)], -.getOneMinusQNorm(stageResults$overallPValues[finalStage]) ) ), nrow = 2, byrow = TRUE ) } probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = design$informationRates[1:finalStage] ) pFinalOtherDirection <- sum(probs[3, ] - probs[2, ]) } pFinal <- 2 * min(pFinal, pFinalOtherDirection) } } return(list(finalStage = finalStage, pFinal = pFinal)) } return(list(finalStage = NA_integer_, pFinal = NA_real_)) } .setWeightsToStageResults <- function(design, stageResults) { if (.isTrialDesignInverseNormal(design)) { stageResults$weightsInverseNormal <- .getWeightsInverseNormal(design) stageResults$.setParameterType("weightsInverseNormal", C_PARAM_GENERATED) } else if (.isTrialDesignFisher(design)) { stageResults$weightsFisher <- .getWeightsFisher(design) stageResults$.setParameterType("weightsFisher", C_PARAM_GENERATED) } } # # Returns the weights for inverse normal statistic # .getWeightsInverseNormal <- function(design) { if (design$kMax == 1) { return(1) } weights <- rep(NA, design$kMax) weights[1] <- sqrt(design$informationRates[1]) weights[2:design$kMax] <- sqrt(design$informationRates[2:design$kMax] - design$informationRates[1:(design$kMax - 1)]) return(weights) } # # Returns the weights for Fisher's combination test statistic # .getWeightsFisher <- function(design) { if (design$kMax == 1) { return(1) } weights <- rep(NA, design$kMax) weights[1] <- 1 weights[2:design$kMax] <- sqrt((design$informationRates[2:design$kMax] - design$informationRates[1:(design$kMax - 1)]) / design$informationRates[1]) return(weights) } # # Returns the stage when using the inverse normal combination test # .getStageInverseNormal <- function(..., design, stageResults, stage) { for (k in 1:stage) { if (stageResults$combInverseNormal[k] >= design$criticalValues[k]) { return(k) } if (design$sided == 2) { if (stageResults$combInverseNormal[k] <= -design$criticalValues[k]) { return(k) } } if (design$bindingFutility && k < design$kMax && stageResults$combInverseNormal[k] <= design$futilityBounds[k]) { return(k) } } # no early stopping return(as.integer(stage + design$kMax)) } # # Returns the stage when using the group sequential test # .getStageGroupSeq <- function(..., design, stageResults, stage) { for (k in 1:stage) { if (.getOneMinusQNorm(stageResults$overallPValues[k]) >= design$criticalValues[k]) { return(k) } if (design$sided == 2) { if (.getOneMinusQNorm(stageResults$overallPValues[k]) <= -design$criticalValues[k]) { return(k) } } if (design$bindingFutility && k < design$kMax && .getQNorm(max(1e-8, 1 - stageResults$overallPValues[k])) <= design$futilityBounds[k]) { return(k) } } # no early stopping return(as.integer(stage + design$kMax)) } # # Returns the stage when using Fisher's combination test # .getStageFisher <- function(..., design, stageResults, stage) { for (k in 1:stage) { if (stageResults$combFisher[k] <= design$criticalValues[k]) { return(k) } if (design$sided == 2) { if (1 - stageResults$combFisher[k] <= design$criticalValues[k]) { return(k) } } if (design$bindingFutility && k < design$kMax && stageResults$pValues[k] >= design$alpha0Vec[k]) { return(k) } } # no early stopping return(as.integer(stage + design$kMax)) } # @title # q function # # @description # Function for calculating the final p-value for two-stage design with Fisher's combination test # and its use for calculating confidence intervals, see Wassmer & Brannath, p. 192 and Brannath et al. (2002), p. 241. # Formula generalized for arbitrary weight in combination test. # .getQFunctionResult <- function(..., design, stageResults, theta, infRate) { alpha1 <- design$criticalValues[1] alpha0 <- design$alpha0Vec[1] if (!design$bindingFutility || (design$sided == 2)) { alpha0 <- 1 } weightForFisher <- stageResults$weightsFisher[2] if (theta != 0) { alpha1Adj <- ifelse(alpha1 <= 0, 0, 1 - stats::pnorm(.getOneMinusQNorm(alpha1) - theta / stageResults$overallStDevs[1] * infRate[1]) ) } else { alpha1Adj <- alpha1 } if (is.na(alpha1Adj)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to calculate 'alpha1Adj'") } if (theta != 0) { alpha0Adj <- ifelse(alpha0 >= 1, 1, 1 - stats::pnorm(.getOneMinusQNorm(alpha0) - theta / stageResults$overallStDevs[1] * infRate[1]) ) } else { alpha0Adj <- alpha0 } if (is.na(alpha0Adj)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to calculate 'alpha0Adj'") } if (stageResults$pValues[1] <= alpha1Adj || stageResults$pValues[1] >= alpha0Adj) { return(stageResults$pValues[1]) } if (weightForFisher == 1) { return(max(alpha1Adj, stageResults$pValues[1] * stageResults$pValues[2]) + stageResults$pValues[1] * stageResults$pValues[2] * (log(alpha0Adj) - log(max( alpha1Adj, stageResults$pValues[1] * stageResults$pValues[2] )))) } return(max(alpha1Adj, stageResults$pValues[1] * stageResults$pValues[2]^weightForFisher) + weightForFisher / (weightForFisher - 1) * stageResults$pValues[1]^(1 / weightForFisher) * stageResults$pValues[2] * (alpha0Adj^(1 - 1 / weightForFisher) - max(alpha1Adj, stageResults$pValues[1] * stageResults$pValues[2]^weightForFisher)^(1 - 1 / weightForFisher))) } # # Get final p-value based on Fisher combination test # .getFinalPValueFisher <- function(stageResults) { design <- stageResults$.design .assertIsTrialDesignFisher(design) stageFisher <- .getStageFisher(design = design, stageResults = stageResults, stage = stageResults$stage) finalStage <- min(stageFisher, design$kMax) # Early stopping or at end of study if (stageFisher < design$kMax || stageResults$stage == design$kMax) { if (stageFisher == 1) { pFinal <- stageResults$pValues[1] } else { if (design$kMax > 2) { message( "Final p-value cannot be calculated for kMax = ", design$kMax, " ", "because the function for Fisher's design is implemented only for kMax <= 2" ) return(list(finalStage = NA_integer_, pFinal = NA_real_)) } # Final p-value for kMax = 2 pFinal <- .getQFunctionResult( design = design, stageResults = stageResults, theta = 0, infRate = 0 ) } if (design$sided == 2) { if (stageFisher == 1) { pFinalOtherDirection <- 1 - stageResults$pValues[1] } else { stageResults$pValues <- 1 - stageResults$pValues pFinalOtherDirection <- .getQFunctionResult( design = design, stageResults = stageResults, theta = 0, infRate = 0 ) stageResults$pValues <- 1 - stageResults$pValues } # Final p-value for kMax = 2 pFinal <- 2 * min(pFinal, pFinalOtherDirection) } return(list(finalStage = finalStage, pFinal = pFinal)) } return(list(finalStage = NA_integer_, pFinal = NA_real_)) } #' #' @title #' Get Final P Value #' #' @description #' Returns the final p-value for given stage results. #' #' @inheritParams param_stageResults #' @param ... Only available for backward compatibility. #' #' @return Returns a \code{\link[base]{list}} containing #' \itemize{ #' \item \code{finalStage}, #' \item \code{pFinal}. #' } #' #' @details #' The calculation of the final p-value is based on the stage-wise ordering of the sample space. #' This enables the calculation for both the non-adaptive and the adaptive case. #' For Fisher's combination test, it is available for \code{kMax = 2} only. #' #' @family analysis functions #' #' @template examples_get_final_p_value #' #' @export #' getFinalPValue <- function(stageResults, ...) { stageResults <- .getStageResultsObject(stageResults, functionName = "getFinalPValue", ...) .assertIsStageResultsNonMultiHypotheses(stageResults) if (stageResults$.design$kMax == 1) { warning("Final p-value is not available for fixed designs", call. = FALSE) return(list(finalStage = NA_integer_, pFinal = NA_real_)) } finalPValue <- NULL if (.isTrialDesignInverseNormalOrGroupSequential(stageResults$.design)) { finalPValue <- .getFinalPValueInverseNormalOrGroupSequential(stageResults) } else if (.isTrialDesignFisher(stageResults$.design)) { finalPValue <- .getFinalPValueFisher(stageResults) } if (is.null(finalPValue)) { .stopWithWrongDesignMessage(stageResults$.design, inclusiveConditionalDunnett = .isMultiArmStageResults(stageResults) ) } if (stageResults$.design$kMax > 1 && is.na(finalPValue$finalStage) && (length(finalPValue$pFinal) == 0 || all(is.na(finalPValue$pFinal)))) { if (.getOptionalArgument("showWarnings", optionalArgumentDefaultValue = TRUE, ...)) { warning("Final p-value not calculated because final stage not reached", call. = FALSE) } } return(finalPValue) } .getVectorWithFinalValueAtFinalStage <- function(..., kMax, finalValue, finalStage) { v <- rep(NA_real_, kMax) if (is.null(finalValue) || is.na(finalValue) || is.null(finalStage) || is.na(finalStage) || finalStage < 1 || finalStage > kMax) { return(v) } v[finalStage] <- finalValue return(v) } #' @title #' Get Final Confidence Interval #' #' @description #' Returns the final confidence interval for the parameter of interest. #' It is based on the prototype case, i.e., the test for testing a mean for #' normally distributed variables. #' #' @inheritParams param_design #' @inheritParams param_dataInput #' @inheritParams param_thetaH0 #' @inheritParams param_directionUpper #' @inheritParams param_tolerance #' @inheritParams param_stage #' @param ... Further (optional) arguments to be passed: #' \describe{ #' \item{\code{normalApproximation}}{ #' The type of computation of the p-values. Default is \code{FALSE} for #' testing means (i.e., the t test is used) and TRUE for testing rates and the hazard ratio. #' For testing rates, if \code{normalApproximation = FALSE} is specified, the binomial test #' (one sample) or the exact test of Fisher (two samples) is used for calculating the p-values. #' In the survival setting, \code{normalApproximation = FALSE} has no effect.} #' \item{\code{equalVariances}}{The type of t test. For testing means in two treatment groups, either #' the t test assuming that the variances are equal or the t test without assuming this, #' i.e., the test of Welch-Satterthwaite is calculated, default is \code{TRUE}.} #' } #' #' @details #' Depending on \code{design} and \code{dataInput} the final confidence interval and median unbiased estimate #' that is based on the stage-wise ordering of the sample space will be calculated and returned. #' Additionally, a non-standardized ("general") version is provided, #' the estimated standard deviation must be used to obtain #' the confidence interval for the parameter of interest. #' #' For the inverse normal combination test design with more than two #' stages, a warning informs that the validity of the confidence interval is theoretically shown only if #' no sample size change was performed. #' #' @return Returns a \code{\link[base]{list}} containing #' \itemize{ #' \item \code{finalStage}, #' \item \code{medianUnbiased}, #' \item \code{finalConfidenceInterval}, #' \item \code{medianUnbiasedGeneral}, and #' \item \code{finalConfidenceIntervalGeneral}. #' } #' #' @family analysis functions #' #' @template examples_get_final_confidence_interval #' #' @export #' getFinalConfidenceInterval <- function(design, dataInput, ..., directionUpper = TRUE, # C_DIRECTION_UPPER_DEFAULT thetaH0 = NA_real_, tolerance = 1e-06, # C_ANALYSIS_TOLERANCE_DEFAULT stage = NA_integer_) { .assertIsValidTolerance(tolerance) designAndDataInput <- .getDesignAndDataInput(design = design, dataInput = dataInput, ...) design <- designAndDataInput$design dataInput <- designAndDataInput$dataInput stage <- .getStageFromOptionalArguments(..., dataInput = dataInput, design = design, stage = stage) .assertIsValidDataInput(dataInput = dataInput, design = design, stage = stage) .assertIsDatasetNonMultiHypotheses(dataInput) on.exit(dataInput$.trim()) if (design$kMax == 1) { warning("Final confidence interval is not available for fixed designs", call. = FALSE) } if (design$kMax > 1 && design$bindingFutility) { warning("Two-sided final confidence bounds are not appropriate, ", "use one-sided version (i.e., one bound) only", call. = FALSE ) } finalConfidenceInterval <- NULL if (dataInput$isDatasetMeans()) { finalConfidenceInterval <- .getFinalConfidenceIntervalMeans( design = design, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, tolerance = tolerance, stage = stage, ... ) } else if (dataInput$isDatasetRates()) { finalConfidenceInterval <- .getFinalConfidenceIntervalRates( design = design, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, tolerance = tolerance, stage = stage, ... ) } else if (dataInput$isDatasetSurvival()) { finalConfidenceInterval <- .getFinalConfidenceIntervalSurvival( design = design, dataInput = dataInput, directionUpper = directionUpper, thetaH0 = thetaH0, tolerance = tolerance, stage = stage ) } if (is.null(finalConfidenceInterval)) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'dataInput' type '", .getClassName(dataInput), "' is not implemented yet") } if (design$kMax > 1 && is.na(finalConfidenceInterval$finalStage) && (length(finalConfidenceInterval$finalConfidenceInterval) == 0 || all(is.na(finalConfidenceInterval$finalConfidenceInterval)))) { warning("Final confidence interval not calculated because final stage not reached", call. = FALSE) } return(finalConfidenceInterval) } # # Get repeated p-values based on group sequential test # .getRepeatedPValuesGroupSequential <- function(..., stageResults, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = ".getRepeatedPValuesGroupSequential", ...) design <- stageResults$.design .assertIsTrialDesignInverseNormalOrGroupSequential(design) repeatedPValues <- rep(NA_real_, design$kMax) if (design$typeOfDesign == C_TYPE_OF_DESIGN_HP && stageResults$stage == design$kMax) { if (!is.na(stageResults$overallPValues[design$kMax]) && .getOneMinusQNorm(stageResults$overallPValues[design$kMax]) == Inf) { repeatedPValues[design$kMax] <- tolerance } else { startTime <- Sys.time() lower <- .getDesignGroupSequential( kMax = design$kMax, sided = design$sided, informationRates = design$informationRates, typeOfDesign = C_TYPE_OF_DESIGN_HP, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility )$alphaSpent[design$kMax - 1] + tolerance upper <- 0.5 repeatedPValues[design$kMax] <- .getOneDimensionalRootBisectionMethod( fun = function(level) { y <- .getDesignGroupSequential( kMax = design$kMax, alpha = level, sided = design$sided, informationRates = design$informationRates, typeOfDesign = C_TYPE_OF_DESIGN_HP, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility ) if (design$sided == 2) { return(y$criticalValues[design$kMax] - abs(.getOneMinusQNorm(stageResults$overallPValues[design$kMax]))) } return(y$criticalValues[design$kMax] - .getOneMinusQNorm(stageResults$overallPValues[design$kMax])) }, lower = lower, upper = upper, tolerance = tolerance, direction = -1, acceptResultsOutOfTolerance = TRUE, suppressWarnings = TRUE, callingFunctionInformation = ".getRepeatedPValuesGroupSequential" ) .logProgress("Repeated p-values for final stage calculated", startTime = startTime) } } else { typeOfDesign <- design$typeOfDesign deltaWT <- design$deltaWT typeBetaSpending <- design$typeBetaSpending if (!design$bindingFutility) { if (design$typeOfDesign == C_TYPE_OF_DESIGN_PT) { typeOfDesign <- C_TYPE_OF_DESIGN_WT deltaWT <- design$deltaPT1 } if (design$typeBetaSpending != "none") { typeBetaSpending <- "none" } } else if ((design$typeOfDesign == C_TYPE_OF_DESIGN_PT) || (design$typeBetaSpending != "none")) { message("Calculation of repeated p-values might take a while for binding case, please wait...") } for (k in 1:stageResults$stage) { if (!is.na(stageResults$overallPValues[k]) && .getOneMinusQNorm(stageResults$overallPValues[k]) == Inf) { repeatedPValues[k] <- tolerance } else { startTime <- Sys.time() upper <- 0.5 repeatedPValues[k] <- .getOneDimensionalRootBisectionMethod( fun = function(level) { y <- .getDesignGroupSequential( kMax = design$kMax, alpha = level, sided = design$sided, informationRates = design$informationRates, typeOfDesign = typeOfDesign, typeBetaSpending = typeBetaSpending, gammaB = design$gammaB, deltaWT = deltaWT, deltaPT0 = design$deltaPT0, deltaPT1 = design$deltaPT1, beta = design$beta, gammaA = design$gammaA, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility ) if (design$sided == 2) { return(y$criticalValues[k] - abs(.getOneMinusQNorm(stageResults$overallPValues[k]))) } return(y$criticalValues[k] - .getOneMinusQNorm(stageResults$overallPValues[k])) }, lower = tolerance, upper = upper, tolerance = tolerance, direction = -1, acceptResultsOutOfTolerance = TRUE, suppressWarnings = TRUE, callingFunctionInformation = ".getRepeatedPValuesGroupSequential" ) .logProgress("Repeated p-values of stage %s calculated", startTime = startTime, k) } } } return(repeatedPValues) } # # Get repeated p-values based on inverse normal method # .getRepeatedPValuesInverseNormal <- function(..., stageResults, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { design <- stageResults$.design .assertIsTrialDesignInverseNormalOrGroupSequential(design) .warnInCaseOfUnknownArguments(functionName = ".getRepeatedPValuesInverseNormal", ...) repeatedPValues <- rep(NA_real_, design$kMax) if (design$typeOfDesign == C_TYPE_OF_DESIGN_HP && stageResults$stage == design$kMax) { if (!is.na(stageResults$combInverseNormal[design$kMax]) && stageResults$combInverseNormal[design$kMax] == Inf) { repeatedPValues[design$kMax] <- tolerance } else { startTime <- Sys.time() lower <- .getDesignGroupSequential( kMax = design$kMax, sided = design$sided, informationRates = design$informationRates, typeOfDesign = C_TYPE_OF_DESIGN_HP, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility )$alphaSpent[design$kMax - 1] + tolerance upper <- 0.5 repeatedPValues[design$kMax] <- .getOneDimensionalRootBisectionMethod( fun = function(level) { y <- .getDesignGroupSequential( kMax = design$kMax, alpha = level, sided = design$sided, informationRates = design$informationRates, typeOfDesign = C_TYPE_OF_DESIGN_HP, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility ) if (design$sided == 2) { return(y$criticalValues[design$kMax] - abs(stageResults$combInverseNormal[design$kMax])) } return(y$criticalValues[design$kMax] - stageResults$combInverseNormal[design$kMax]) }, lower = lower, upper = upper, tolerance = tolerance, direction = -1, acceptResultsOutOfTolerance = TRUE, suppressWarnings = TRUE, callingFunctionInformation = ".getRepeatedPValuesInverseNormal" ) .logProgress("Repeated p-values for final stage calculated", startTime = startTime) } } else { typeOfDesign <- design$typeOfDesign deltaWT <- design$deltaWT typeBetaSpending <- design$typeBetaSpending if (!design$bindingFutility) { if (design$typeOfDesign == C_TYPE_OF_DESIGN_PT) { typeOfDesign <- C_TYPE_OF_DESIGN_WT deltaWT <- design$deltaPT1 } if (design$typeBetaSpending != "none") { typeBetaSpending <- "none" } } else if ((design$typeOfDesign == C_TYPE_OF_DESIGN_PT) || (design$typeBetaSpending != "none")) { message("Calculation of repeated p-values might take a while for binding case, please wait...") } for (k in 1:stageResults$stage) { if (!is.na(stageResults$combInverseNormal[k]) && (stageResults$combInverseNormal[k] == Inf)) { repeatedPValues[k] <- tolerance } else { startTime <- Sys.time() upper <- 0.5 repeatedPValues[k] <- .getOneDimensionalRootBisectionMethod( fun = function(level) { y <- .getDesignGroupSequential( kMax = design$kMax, alpha = level, sided = design$sided, informationRates = design$informationRates, typeOfDesign = typeOfDesign, typeBetaSpending = typeBetaSpending, gammaB = design$gammaB, deltaWT = deltaWT, deltaPT0 = design$deltaPT0, deltaPT1 = design$deltaPT1, beta = design$beta, gammaA = design$gammaA, futilityBounds = design$futilityBounds, bindingFutility = design$bindingFutility ) if (design$sided == 2) { return(y$criticalValues[k] - abs(stageResults$combInverseNormal[k])) } return(y$criticalValues[k] - stageResults$combInverseNormal[k]) }, lower = tolerance, upper = upper, tolerance = tolerance, direction = -1, acceptResultsOutOfTolerance = TRUE, suppressWarnings = TRUE, callingFunctionInformation = ".getRepeatedPValuesInverseNormal" ) .logProgress("Repeated p-values of stage %s calculated", startTime = startTime, k) } } } return(repeatedPValues) } # # Get repeated p-values based on Fisher combination test # .getRepeatedPValuesFisher <- function(..., stageResults, tolerance = C_ANALYSIS_TOLERANCE_DEFAULT) { .warnInCaseOfUnknownArguments(functionName = ".getRepeatedPValuesFisher", ...) design <- stageResults$.design .assertIsTrialDesignFisher(design) repeatedPValues <- rep(NA_real_, design$kMax) for (k in 1:stageResults$stage) { if (!is.na(stageResults$combFisher[k]) && (stageResults$combFisher[k] == 0)) { repeatedPValues[k] <- tolerance } else { startTime <- Sys.time() repeatedPValues[k] <- .getOneDimensionalRootBisectionMethod( fun = function(level) { y <- .getDesignFisher( kMax = design$kMax, alpha = level, sided = design$sided, informationRates = design$informationRates, alpha0Vec = design$alpha0Vec, bindingFutility = design$bindingFutility, method = design$method ) if (design$sided == 2) { combFisherNegStagek <- prod((1 - stageResults$pValues[1:k])^stageResults$weightsFisher[1:k]) return(y$criticalValues[k] - min(stageResults$combFisher[k], combFisherNegStagek)) } return(y$criticalValues[k] - stageResults$combFisher[k]) }, lower = tolerance, upper = 0.5, tolerance = tolerance, direction = 1, acceptResultsOutOfTolerance = TRUE, suppressWarnings = TRUE, callingFunctionInformation = ".getRepeatedPValuesFisher" ) .logProgress("Repeated p-values of stage %s calculated", startTime = startTime, k) } } return(repeatedPValues) } .getRejectValueConditionalPowerFisher <- function(..., kMax, alpha0Vec, criticalValues, weightsFisher, pValues, currentKMax, thetaH1, stage, nPlanned) { pValues <- c(pValues[1:stage], 1 - stats::pnorm(stats::rnorm( kMax - stage, thetaH1 * sqrt(nPlanned[(stage + 1):currentKMax]) ))) for (j in 1:currentKMax) { reject <- .getRejectValueFisherForOneStage( kMax = currentKMax, alpha0Vec = alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, stage = j, pValues = pValues ) if (reject >= 0) { return(reject) } } return(0) } .getRejectValueFisherForOneStage <- function(..., kMax, alpha0Vec, criticalValues, weightsFisher, stage, pValues) { if (stage < kMax && pValues[stage] >= alpha0Vec[stage]) { return(0) } p <- prod(pValues[1:stage]^weightsFisher[1:stage]) if (is.na(p)) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "calculation of 'p' failed for stage ", stage, " ('pValues' = ", .arrayToString(pValues), ", 'weightsFisher' = ", .arrayToString(weightsFisher), ")" ) } if (is.na(criticalValues[stage])) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "no critical value available for stage ", stage, " ('criticalValues' = ", .arrayToString(criticalValues), ")" ) } if (p < criticalValues[stage]) { return(1) } return(-1) } .getRejectValueCrpFisher <- function(..., kMax, alpha0Vec, criticalValues, weightsFisher, k, stageResults) { pValues <- c(stageResults$pValues[1:k], stats::runif(kMax - k)) for (stage in 1:kMax) { reject <- .getRejectValueFisherForOneStage( kMax = kMax, alpha0Vec = alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, stage = stage, pValues = pValues ) if (reject >= 0) { return(reject) } } return(0) } # # Get CRP based on inverse normal or group sequential method # .getConditionalRejectionProbabilitiesInverseNormalorGroupSequential <- function(..., stageResults) { .warnInCaseOfUnknownArguments( functionName = ".getConditionalRejectionProbabilitiesInverseNormalorGroupSequential", ignore = c("design"), ... ) design <- stageResults$.design .assertIsTrialDesignInverseNormalOrGroupSequential(design) criticalValues <- design$criticalValues informationRates <- design$informationRates weights <- stageResults$weightsInverseNormal futilityBounds <- design$futilityBounds kMax <- design$kMax conditionalRejectionProbabilities <- rep(NA_real_, kMax) if (kMax == 1) { return(NA_real_) } for (k in 1:min(kMax - 1, stageResults$stage)) { if (.isTrialDesignInverseNormal(design)) { # Shifted decision region for use in getGroupSeqProbs shiftedDecision <- criticalValues[(k + 1):kMax] * sqrt(sum(weights[1:k]^2) + cumsum(weights[(k + 1):kMax]^2)) / sqrt(cumsum(weights[(k + 1):kMax]^2)) - as.vector(weights[1:k] %*% .getOneMinusQNorm(stageResults$pValues[1:k])) / sqrt(cumsum(weights[(k + 1):kMax]^2)) if (k == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- futilityBounds[(k + 1):(kMax - 1)] * sqrt(sum(weights[1:k]^2) + cumsum(weights[(k + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(k + 1):(kMax - 1)]^2)) - as.vector(weights[1:k] %*% .getOneMinusQNorm(stageResults$pValues[1:k])) / sqrt(cumsum(weights[(k + 1):(kMax - 1)]^2)) } } else { # Shifted decision region for use in getGroupSeqProbs shiftedDecision <- criticalValues[(k + 1):kMax] * sqrt(sum(weights[1:k]^2) + cumsum(weights[(k + 1):kMax]^2)) / sqrt(cumsum(weights[(k + 1):kMax]^2)) - .getOneMinusQNorm(stageResults$overallPValues[k]) * sqrt(sum(weights[1:k]^2)) / sqrt(cumsum(weights[(k + 1):kMax]^2)) if (k == kMax - 1) { shiftedFutilityBounds <- c() } else { shiftedFutilityBounds <- futilityBounds[(k + 1):(kMax - 1)] * sqrt(sum(weights[1:k]^2) + cumsum(weights[(k + 1):(kMax - 1)]^2)) / sqrt(cumsum(weights[(k + 1):(kMax - 1)]^2)) - .getOneMinusQNorm(stageResults$overallPValues[k]) * sqrt(sum(weights[1:k]^2)) / sqrt(cumsum(weights[(k + 1):(kMax - 1)]^2)) } } # Scaled information for use in getGroupSeqProbs scaledInformation <- (informationRates[(k + 1):kMax] - informationRates[k]) / (1 - informationRates[k]) if (design$sided == 2) { decisionMatrix <- matrix(c(-shiftedDecision, shiftedDecision), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities(decisionMatrix = decisionMatrix, informationRates = scaledInformation) crp <- sum(probs[3, ] - probs[2, ] + probs[1, ]) } else { if (design$bindingFutility) { decisionMatrix <- matrix(c(shiftedFutilityBounds, C_FUTILITY_BOUNDS_DEFAULT, shiftedDecision), nrow = 2, byrow = TRUE ) } else { decisionMatrix <- matrix(c(rep(C_FUTILITY_BOUNDS_DEFAULT, kMax - k), shiftedDecision), nrow = 2, byrow = TRUE ) } probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = scaledInformation ) crp <- sum(probs[3, ] - probs[2, ]) } conditionalRejectionProbabilities[k] <- crp } if (design$bindingFutility) { for (k in 1:min(kMax - 1, stageResults$stage)) { if (!is.na(futilityBounds[k])) { if (.isTrialDesignInverseNormal(design)) { if (stageResults$combInverseNormal[k] <= futilityBounds[k]) { conditionalRejectionProbabilities[k:stageResults$stage] <- 0 } } else { if (.getOneMinusQNorm(stageResults$overallPValues[k]) <= futilityBounds[k]) { conditionalRejectionProbabilities[k:stageResults$stage] <- 0 } } } } } return(conditionalRejectionProbabilities) } # # Get CRP based on Fisher combination test # .getConditionalRejectionProbabilitiesFisher <- function(..., stageResults) { .warnInCaseOfUnknownArguments( functionName = ".getConditionalRejectionProbabilitiesFisher", ignore = c("stage", "design"), ... ) design <- stageResults$.design .assertIsTrialDesignFisher(design) kMax <- design$kMax if (kMax == 1) { return(NA_real_) } criticalValues <- design$criticalValues weights <- stageResults$weightsFisher if (design$bindingFutility) { alpha0Vec <- design$alpha0Vec } else { alpha0Vec <- rep(1, kMax - 1) } conditionalRejectionProbabilities <- rep(NA_real_, kMax) for (k in (1:min(kMax - 1, stageResults$stage))) { if (prod(stageResults$pValues[1:k]^weights[1:k]) <= criticalValues[k]) { conditionalRejectionProbabilities[k] <- 1 } else { if (k < kMax - 1) { conditionalRejectionProbabilities[k] <- .getFisherCombinationSize( kMax - k, alpha0Vec[(k + 1):(kMax - 1)], (criticalValues[(k + 1):kMax] / prod(stageResults$pValues[1:k]^weights[1:k]))^(1 / weights[k + 1]), weights[(k + 2):kMax] / weights[k + 1] ) } else { conditionalRejectionProbabilities[k] <- (criticalValues[kMax] / prod(stageResults$pValues[1:k]^weights[1:k]))^(1 / weights[kMax]) } } } if (design$bindingFutility) { for (k in (1:min(kMax - 1, stageResults$stage))) { if (stageResults$pValues[k] > alpha0Vec[k]) { conditionalRejectionProbabilities[k:stageResults$stage] <- 0 } } } conditionalRejectionProbabilities[conditionalRejectionProbabilities >= 1] <- 1 conditionalRejectionProbabilities[conditionalRejectionProbabilities < 0] <- NA_real_ return(conditionalRejectionProbabilities) } # # Get CRP based on Fisher combination test, tested through simulation # .getConditionalRejectionProbabilitiesFisherSimulated <- function(..., stageResults, iterations = 0, seed = NA_real_) { .warnInCaseOfUnknownArguments( functionName = ".getConditionalRejectionProbabilitiesFisherSimulated", ignore = c("design", "simulateCRP"), ... ) design <- stageResults$.design .assertIsTrialDesignFisher(design) .assertIsValidIterationsAndSeed(iterations, seed) criticalValues <- design$criticalValues alpha0Vec <- design$alpha0Vec weightsFisher <- stageResults$weightsFisher kMax <- design$kMax crpFisherSimulated <- rep(NA_real_, kMax) if (iterations > 0) { seed <- .setSeed(seed) if (kMax >= 2) { for (k in 1:min(kMax - 1, stageResults$stage)) { reject <- 0 for (i in 1:iterations) { reject <- reject + .getRejectValueCrpFisher( kMax = kMax, alpha0Vec = alpha0Vec, criticalValues = criticalValues, weightsFisher = weightsFisher, k = k, stageResults = stageResults ) } crpFisherSimulated[k] <- reject / iterations } } else { warning("Simulation of CRP Fisher stopped: 'kMax' must be >= 2", call. = FALSE) } } return(list( crpFisherSimulated = crpFisherSimulated, iterations = iterations, seed = seed )) } #' #' @title #' Get Conditional Rejection Probabilities #' #' @description #' Calculates the conditional rejection probabilities (CRP) for given test results. #' #' @inheritParams param_stageResults #' @param ... Further (optional) arguments to be passed: #' \describe{ #' \item{\code{iterations}}{Iterations for simulating the conditional #' rejection probabilities for Fisher's combination test. #' For checking purposes, it can be estimated via simulation with #' specified \code{iterations}.} #' \item{\code{seed}}{Seed for simulating the conditional rejection probabilities #' for Fisher's combination test. See above, default is a random seed.} #' } #' #' @details #' The conditional rejection probability is the probability, under H0, to reject H0 #' in one of the subsequent (remaining) stages. #' The probability is calculated using the specified design. For testing rates and the #' survival design, the normal approximation is used, i.e., it is calculated with the #' use of the prototype case testing a mean for normally distributed data with known variance. #' #' The conditional rejection probabilities are provided up to the specified stage. #' #' For Fisher's combination test, you can check the validity of the CRP calculation via simulation. #' #' @return Returns a \code{\link[base]{numeric}} vector of length \code{kMax} or in case of multi-arm stage results #' a \code{\link[base]{matrix}} (each column represents a stage, each row a comparison) #' containing the conditional rejection probabilities. #' #' @family analysis functions #' #' @template examples_get_conditional_rejection_probabilities #' #' @export #' getConditionalRejectionProbabilities <- function(stageResults, ...) { stageResults <- .getStageResultsObject(stageResults, functionName = "getConditionalRejectionProbabilities", ... ) if (.isEnrichmentStageResults(stageResults)) { return(.getConditionalRejectionProbabilitiesEnrichment(stageResults = stageResults, ...)) } if (.isMultiArmStageResults(stageResults)) { return(.getConditionalRejectionProbabilitiesMultiArm(stageResults = stageResults, ...)) } .assertIsStageResults(stageResults) if (.isTrialDesignInverseNormalOrGroupSequential(stageResults$.design)) { return(.getConditionalRejectionProbabilitiesInverseNormalorGroupSequential( stageResults = stageResults, ... )) } if (.isTrialDesignFisher(stageResults$.design)) { simulateCRP <- .getOptionalArgument("simulateCRP", ...) if (!is.null(simulateCRP) && isTRUE(simulateCRP)) { iterations <- .getOptionalArgument("iterations", ...) if (!is.null(iterations) && iterations > 0) { return(.getConditionalRejectionProbabilitiesFisherSimulated( stageResults = stageResults, ... )) } } return(.getConditionalRejectionProbabilitiesFisher( stageResults = stageResults, ... )) } .stopWithWrongDesignMessage(stageResults$.design, inclusiveConditionalDunnett = .isMultiArmStageResults(stageResults) ) } .getDecisionMatrixRoot <- function(..., design, stage, stageResults, tolerance, firstParameterName, case = c("finalConfidenceIntervalGeneralLower", "finalConfidenceIntervalGeneralUpper", "medianUnbiasedGeneral")) { case <- match.arg(case) firstValue <- stageResults[[firstParameterName]][stage] if (.isTrialDesignGroupSequential(design)) { firstValue <- .getOneMinusQNorm(firstValue) } if (firstValue >= 8) { return(NA_real_) } result <- .getOneDimensionalRoot( function(theta) { if (design$bindingFutility) { row1part1 <- design$futilityBounds[1:(stage - 1)] } else { row1part1 <- rep(C_FUTILITY_BOUNDS_DEFAULT, stage - 1) } row1part2 <- C_FUTILITY_BOUNDS_DEFAULT row2part1 <- design$criticalValues[1:(stage - 1)] row2part2 <- firstValue if (.isTrialDesignGroupSequential(design)) { if (stageResults$isDatasetSurvival()) { row1part3 <- theta * sqrt(design$informationRates[1:stage] / design$informationRates[stage]) * sqrt(stageResults$overallEvents[stage]) } else { if (stageResults$isOneSampleDataset()) { row1part3 <- theta * sqrt(design$informationRates[1:stage] / design$informationRates[stage]) * sqrt(stageResults$overallSampleSizes[stage]) } if (stageResults$isTwoSampleDataset()) { row1part3 <- theta * sqrt(design$informationRates[1:stage] / design$informationRates[stage]) / sqrt(1 / stageResults$overallSampleSizes1[stage] + 1 / stageResults$overallSampleSizes2[stage]) } } } if (.isTrialDesignInverseNormal(design)) { if (stageResults$isDatasetSurvival()) { events <- stageResults$getDataInput()$getEventsUpTo(stage) adjInfRate <- cumsum(stageResults$weightsInverseNormal[1:stage] * sqrt(events[1:stage])) / sqrt(cumsum(stageResults$weightsInverseNormal[1:stage]^2)) } else { if (stageResults$isOneSampleDataset()) { sampleSizes <- stageResults$getDataInput()$getSampleSizesUpTo(stage) adjInfRate <- cumsum(stageResults$weightsInverseNormal[1:stage] * sqrt(sampleSizes[1:stage])) / sqrt(cumsum(stageResults$weightsInverseNormal[1:stage]^2)) } if (stageResults$isTwoSampleDataset()) { sampleSizes1 <- stageResults$getDataInput()$getSampleSizesUpTo(stage, 1) sampleSizes2 <- stageResults$getDataInput()$getSampleSizesUpTo(stage, 2) adjInfRate <- cumsum(stageResults$weightsInverseNormal[1:stage] / sqrt(1 / sampleSizes1[1:stage] + 1 / sampleSizes2[1:stage])) / sqrt(cumsum(stageResults$weightsInverseNormal[1:stage]^2)) } } row1part3 <- theta * adjInfRate } row2part3 <- row1part3 row1 <- c(row1part1, row1part2) - row1part3 row2 <- c(row2part1, row2part2) - row2part3 decisionMatrix <- matrix(c(row1, row2), nrow = 2, byrow = TRUE) probs <- .getGroupSequentialProbabilities( decisionMatrix = decisionMatrix, informationRates = design$informationRates[1:stage] ) if (case == "finalConfidenceIntervalGeneralLower") { return(sum(probs[3, ] - probs[2, ]) - design$alpha / design$sided) } else if (case == "finalConfidenceIntervalGeneralUpper") { return(1 - sum(probs[3, ] - probs[2, ]) - design$alpha / design$sided) } else if (case == "medianUnbiasedGeneral") { return(sum(probs[3, ] - probs[2, ]) - 0.50) } else { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'case' = '", case, "' is not implemented") } }, lower = -8, upper = 8, tolerance = tolerance, callingFunctionInformation = ".getDecisionMatrixRoot" ) } rpact/R/f_simulation_base_survival.R0000644000176200001440000012317614445307576017356 0ustar liggesusers## | ## | *Simulation of survival data with group sequential and combination test* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7126 $ ## | Last changed: $Date: 2023-06-23 14:26:39 +0200 (Fr, 23 Jun 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include class_simulation_results.R #' @include f_core_utilities.R NULL .isLambdaBasedSimulationEnabled <- function(pwsTimeObject) { if (!pwsTimeObject$.isLambdaBased()) { return(FALSE) } if (pwsTimeObject$delayedResponseEnabled) { return(TRUE) } if (pwsTimeObject$piecewiseSurvivalEnabled) { return(TRUE) } if (pwsTimeObject$kappa != 1) { if (length(pwsTimeObject$lambda1) != 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "if 'kappa' != 1 then 'lambda1' (", .arrayToString(pwsTimeObject$lambda1), ") must be a single numeric value" ) } if (length(pwsTimeObject$lambda2) != 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "if 'kappa' != 1 then 'lambda2' (", .arrayToString(pwsTimeObject$lambda2), ") must be a single numeric value" ) } return(TRUE) } if (pwsTimeObject$.getParameterType("hazardRatio") == C_PARAM_USER_DEFINED && !all(is.na(pwsTimeObject$hazardRatio))) { if (pwsTimeObject$.getParameterType("lambda1") == C_PARAM_USER_DEFINED && length(pwsTimeObject$lambda1) == length(pwsTimeObject$hazardRatio) && !all(is.na(pwsTimeObject$lambda1))) { return(TRUE) } if (pwsTimeObject$.getParameterType("lambda2") == C_PARAM_USER_DEFINED && length(pwsTimeObject$lambda2) == length(pwsTimeObject$hazardRatio) && !all(is.na(pwsTimeObject$lambda2))) { return(TRUE) } } return(FALSE) } #' @title #' Get Simulation Survival #' #' @description #' Returns the analysis times, power, stopping probabilities, conditional power, and expected sample size #' for testing the hazard ratio in a two treatment groups survival design. #' #' @inheritParams param_design_with_default #' @inheritParams param_thetaH0 #' @inheritParams param_directionUpper #' @inheritParams param_pi1_survival #' @inheritParams param_pi2_survival #' @inheritParams param_lambda1 #' @inheritParams param_lambda2 #' @inheritParams param_median1 #' @inheritParams param_median2 #' @inheritParams param_hazardRatio #' @inheritParams param_piecewiseSurvivalTime #' @inheritParams param_kappa #' @param allocation1 The number how many subjects are assigned to treatment 1 in a #' subsequent order, default is \code{1} #' @param allocation2 The number how many subjects are assigned to treatment 2 in a #' subsequent order, default is \code{1} #' @inheritParams param_eventTime #' @inheritParams param_accrualTime #' @inheritParams param_accrualIntensity #' @inheritParams param_accrualIntensityType #' @inheritParams param_dropoutRate1 #' @inheritParams param_dropoutRate2 #' @inheritParams param_dropoutTime #' @inheritParams param_maxNumberOfSubjects_survival #' @inheritParams param_plannedEvents #' @inheritParams param_minNumberOfEventsPerStage #' @inheritParams param_maxNumberOfEventsPerStage #' @inheritParams param_conditionalPowerSimulation #' @inheritParams param_thetaH1 #' @inheritParams param_maxNumberOfIterations #' @inheritParams param_calcEventsFunction #' @inheritParams param_showStatistics #' @param maxNumberOfRawDatasetsPerStage The number of raw datasets per stage that shall #' be extracted and saved as \code{\link[base]{data.frame}}, default is \code{0}. #' \code{\link[=getRawData]{getRawData()}} can be used to get the extracted raw data from the object. #' @param longTimeSimulationAllowed Logical that indicates whether long time simulations #' that consumes more than 30 seconds are allowed or not, default is \code{FALSE}. #' @inheritParams param_seed #' @inheritParams param_three_dots #' #' @details #' At given design the function simulates the power, stopping probabilities, conditional power, and expected #' sample size at given number of events, number of subjects, and parameter configuration. #' It also simulates the time when the required events are expected under the given #' assumptions (exponentially, piecewise exponentially, or Weibull distributed survival times #' and constant or non-constant piecewise accrual). #' Additionally, integers \code{allocation1} and \code{allocation2} can be specified that determine the number allocated #' to treatment group 1 and treatment group 2, respectively. #' More precisely, unequal randomization ratios must be specified via the two integer arguments \code{allocation1} and #' \code{allocation2} which describe how many subjects are consecutively enrolled in each group, respectively, before a #' subject is assigned to the other group. For example, the arguments \code{allocation1 = 2}, \code{allocation2 = 1}, #' \code{maxNumberOfSubjects = 300} specify 2:1 randomization with 200 subjects randomized to intervention and 100 to #' control. (Caveat: Do not use \code{allocation1 = 200}, \code{allocation2 = 100}, \code{maxNumberOfSubjects = 300} #' as this would imply that the 200 intervention subjects are enrolled prior to enrollment of any control subjects.) #' #' \code{conditionalPower}\cr #' The definition of \code{thetaH1} makes only sense if \code{kMax} > 1 #' and if \code{conditionalPower}, \code{minNumberOfEventsPerStage}, and #' \code{maxNumberOfEventsPerStage} are defined. #' #' Note that \code{numberOfSubjects}, \code{numberOfSubjects1}, and \code{numberOfSubjects2} in the output #' are the expected number of subjects. #' #' \code{calcEventsFunction}\cr #' This function returns the number of events at given conditional power and conditional critical value for specified #' testing situation. The function might depend on variables #' \code{stage}, #' \code{conditionalPower}, #' \code{thetaH0}, #' \code{plannedEvents}, #' \code{eventsPerStage}, #' \code{minNumberOfEventsPerStage}, #' \code{maxNumberOfEventsPerStage}, #' \code{allocationRatioPlanned}, #' \code{conditionalCriticalValue}, #' The function has to contain the three-dots argument '...' (see examples). #' #' @template details_piecewise_survival #' #' @template details_piecewise_accrual #' #' @section Simulation Data: #' The summary statistics "Simulated data" contains the following parameters: median [range]; mean +/-sd\cr #' #' \code{$show(showStatistics = FALSE)} or \code{$setShowStatistics(FALSE)} can be used to disable #' the output of the aggregated simulated data.\cr #' #' Example 1: \cr #' \code{simulationResults <- getSimulationSurvival(maxNumberOfSubjects = 100, plannedEvents = 30)} \cr #' \code{simulationResults$show(showStatistics = FALSE)}\cr #' #' Example 2: \cr #' \code{simulationResults <- getSimulationSurvival(maxNumberOfSubjects = 100, plannedEvents = 30)} \cr #' \code{simulationResults$setShowStatistics(FALSE)}\cr #' \code{simulationResults}\cr #' #' \code{\link[=getData]{getData()}} can be used to get the aggregated simulated data from the #' object as \code{\link[base]{data.frame}}. The data frame contains the following columns: #' \enumerate{ #' \item \code{iterationNumber}: The number of the simulation iteration. #' \item \code{stageNumber}: The stage. #' \item \code{pi1}: The assumed or derived event rate in the treatment group. #' \item \code{pi2}: The assumed or derived event rate in the control group. #' \item \code{hazardRatio}: The hazard ratio under consideration (if available). #' \item \code{analysisTime}: The analysis time. #' \item \code{numberOfSubjects}: The number of subjects under consideration when the #' (interim) analysis takes place. #' \item \code{eventsPerStage1}: The observed number of events per stage #' in treatment group 1. #' \item \code{eventsPerStage2}: The observed number of events per stage #' in treatment group 2. #' \item \code{eventsPerStage}: The observed number of events per stage #' in both treatment groups. #' \item \code{rejectPerStage}: 1 if null hypothesis can be rejected, 0 otherwise. #' \item \code{futilityPerStage}: 1 if study should be stopped for futility, 0 otherwise. #' \item \code{eventsNotAchieved}: 1 if number of events could not be reached with #' observed number of subjects, 0 otherwise. #' \item \code{testStatistic}: The test statistic that is used for the test decision, #' depends on which design was chosen (group sequential, inverse normal, #' or Fisher combination test)' #' \item \code{logRankStatistic}: Z-score statistic which corresponds to a one-sided #' log-rank test at considered stage. #' \item \code{hazardRatioEstimateLR}: The estimated hazard ratio, derived from the #' log-rank statistic. #' \item \code{trialStop}: \code{TRUE} if study should be stopped for efficacy or futility or final stage, \code{FALSE} otherwise. #' \item \code{conditionalPowerAchieved}: The conditional power for the subsequent stage of the trial for #' selected sample size and effect. The effect is either estimated from the data or can be #' user defined with \code{thetaH1}. #' } #' #' @section Raw Data: #' \code{\link[=getRawData]{getRawData()}} can be used to get the simulated raw data from the #' object as \code{\link[base]{data.frame}}. Note that \code{getSimulationSurvival()} #' must called before with \code{maxNumberOfRawDatasetsPerStage} > 0. #' The data frame contains the following columns: #' \enumerate{ #' \item \code{iterationNumber}: The number of the simulation iteration. #' \item \code{stopStage}: The stage of stopping. #' \item \code{subjectId}: The subject id (increasing number 1, 2, 3, ...) #' \item \code{accrualTime}: The accrual time, i.e., the time when the subject entered the trial. #' \item \code{treatmentGroup}: The treatment group number (1 or 2). #' \item \code{survivalTime}: The survival time of the subject. #' \item \code{dropoutTime}: The dropout time of the subject (may be \code{NA}). #' \item \code{observationTime}: The specific observation time. #' \item \code{timeUnderObservation}: The time under observation is defined as follows:\cr #' if (event == TRUE) {\cr #' timeUnderObservation <- survivalTime;\cr #' } else if (dropoutEvent == TRUE) {\cr #' timeUnderObservation <- dropoutTime;\cr #' } else {\cr #' timeUnderObservation <- observationTime - accrualTime;\cr #' } #' \item \code{event}: \code{TRUE} if an event occurred; \code{FALSE} otherwise. #' \item \code{dropoutEvent}: \code{TRUE} if an dropout event occurred; \code{FALSE} otherwise. #' } #' #' @template return_object_simulation_results #' @template how_to_get_help_for_generics #' #' @template examples_get_simulation_survival #' #' @export #' getSimulationSurvival <- function(design = NULL, ..., thetaH0 = 1, # C_THETA_H0_SURVIVAL_DEFAULT directionUpper = TRUE, # C_DIRECTION_UPPER_DEFAULT pi1 = NA_real_, pi2 = NA_real_, lambda1 = NA_real_, lambda2 = NA_real_, median1 = NA_real_, median2 = NA_real_, hazardRatio = NA_real_, kappa = 1, piecewiseSurvivalTime = NA_real_, allocation1 = 1, # C_ALLOCATION_1_DEFAULT allocation2 = 1, # C_ALLOCATION_2_DEFAULT eventTime = 12, # C_EVENT_TIME_DEFAULT accrualTime = c(0, 12), # C_ACCRUAL_TIME_DEFAULT accrualIntensity = 0.1, # C_ACCRUAL_INTENSITY_DEFAULT accrualIntensityType = c("auto", "absolute", "relative"), dropoutRate1 = 0, # C_DROP_OUT_RATE_1_DEFAULT dropoutRate2 = 0, # C_DROP_OUT_RATE_2_DEFAULT dropoutTime = 12, # C_DROP_OUT_TIME_DEFAULT maxNumberOfSubjects = NA_real_, plannedEvents = NA_real_, minNumberOfEventsPerStage = NA_real_, maxNumberOfEventsPerStage = NA_real_, conditionalPower = NA_real_, thetaH1 = NA_real_, maxNumberOfIterations = 1000L, # C_MAX_SIMULATION_ITERATIONS_DEFAULT maxNumberOfRawDatasetsPerStage = 0, longTimeSimulationAllowed = FALSE, seed = NA_real_, calcEventsFunction = NULL, showStatistics = FALSE) { .assertRcppIsInstalled() if (is.null(design)) { design <- .getDefaultDesign(..., type = "simulation") .warnInCaseOfUnknownArguments( functionName = "getSimulationSurvival", ignore = c(.getDesignArgumentsToIgnoreAtUnknownArgumentCheck( design, powerCalculationEnabled = TRUE ), "showStatistics"), ... ) } else { .assertIsTrialDesign(design) .warnInCaseOfUnknownArguments( functionName = "getSimulationSurvival", ignore = "showStatistics", ... ) .warnInCaseOfTwoSidedPowerArgument(...) } .assertIsSingleLogical(directionUpper, "directionUpper") .assertIsSingleNumber(thetaH0, "thetaH0") .assertIsInOpenInterval(thetaH0, "thetaH0", 0, NULL, naAllowed = TRUE) .assertIsNumericVector(minNumberOfEventsPerStage, "minNumberOfEventsPerStage", naAllowed = TRUE) .assertIsNumericVector(maxNumberOfEventsPerStage, "maxNumberOfEventsPerStage", naAllowed = TRUE) .assertIsSingleNumber(conditionalPower, "conditionalPower", naAllowed = TRUE) .assertIsInOpenInterval(conditionalPower, "conditionalPower", 0, 1, naAllowed = TRUE) .assertIsSingleNumber(thetaH1, "thetaH1", naAllowed = TRUE) .assertIsInOpenInterval(thetaH1, "thetaH1", 0, NULL, naAllowed = TRUE) .assertIsSinglePositiveInteger(maxNumberOfIterations, "maxNumberOfIterations", validateType = FALSE) .assertIsSingleNumber(seed, "seed", naAllowed = TRUE) .assertIsNumericVector(lambda1, "lambda1", naAllowed = TRUE) .assertIsNumericVector(lambda2, "lambda2", naAllowed = TRUE) .assertIsSinglePositiveInteger(maxNumberOfSubjects, "maxNumberOfSubjects", validateType = FALSE, naAllowed = TRUE ) .assertIsIntegerVector(allocation1, "allocation1", validateType = FALSE) .assertIsIntegerVector(allocation2, "allocation2", validateType = FALSE) .assertIsInClosedInterval(allocation1, "allocation1", lower = 1L, upper = NULL) .assertIsInClosedInterval(allocation2, "allocation2", lower = 1L, upper = NULL) .assertIsSingleLogical(longTimeSimulationAllowed, "longTimeSimulationAllowed") .assertIsSingleLogical(showStatistics, "showStatistics", naAllowed = FALSE) .assertIsValidPlannedSubjectsOrEvents(design, plannedEvents, parameterName = "plannedEvents") if (design$sided == 2) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "Only one-sided case is implemented for the survival simulation design" ) } if (!all(is.na(lambda2)) && !all(is.na(lambda1)) && length(lambda2) != length(lambda1) && length(lambda2) > 1) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "length of 'lambda2' (", length(lambda2), ") must be equal to length of 'lambda1' (", length(lambda1), ")" ) } if (all(is.na(lambda2)) && !all(is.na(lambda1))) { warning("'lambda1' (", .arrayToString(lambda1), ") will be ignored ", "because 'lambda2' (", .arrayToString(lambda2), ") is undefined", call. = FALSE ) lambda1 <- NA_real_ } if (!all(is.na(lambda2)) && is.list(piecewiseSurvivalTime)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'piecewiseSurvivalTime' needs to be a numeric vector and not a list ", "because 'lambda2' (", .arrayToString(lambda2), ") is defined separately" ) } thetaH1 <- .ignoreParameterIfNotUsed( "thetaH1", thetaH1, design$kMax > 1, "design is fixed ('kMax' = 1)", "Assumed effect" ) if (is.na(conditionalPower) && !is.na(thetaH1)) { warning("'thetaH1' will be ignored because 'conditionalPower' is not defined", call. = FALSE) } conditionalPower <- .ignoreParameterIfNotUsed( "conditionalPower", conditionalPower, design$kMax > 1, "design is fixed ('kMax' = 1)" ) minNumberOfEventsPerStage <- .ignoreParameterIfNotUsed( "minNumberOfEventsPerStage", minNumberOfEventsPerStage, design$kMax > 1, "design is fixed ('kMax' = 1)" ) maxNumberOfEventsPerStage <- .ignoreParameterIfNotUsed( "maxNumberOfEventsPerStage", maxNumberOfEventsPerStage, design$kMax > 1, "design is fixed ('kMax' = 1)" ) minNumberOfEventsPerStage <- .assertIsValidNumberOfSubjectsPerStage(minNumberOfEventsPerStage, "minNumberOfEventsPerStage", plannedEvents, conditionalPower, NULL, design$kMax, endpoint = "survival", calcSubjectsFunctionEnabled = FALSE ) maxNumberOfEventsPerStage <- .assertIsValidNumberOfSubjectsPerStage(maxNumberOfEventsPerStage, "maxNumberOfEventsPerStage", plannedEvents, conditionalPower, NULL, design$kMax, endpoint = "survival", calcSubjectsFunctionEnabled = FALSE ) simulationResults <- SimulationResultsSurvival(design, showStatistics = showStatistics) if (!is.na(conditionalPower)) { if (design$kMax > 1) { if (any(maxNumberOfEventsPerStage - minNumberOfEventsPerStage < 0) && !all(is.na(maxNumberOfEventsPerStage - minNumberOfEventsPerStage))) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfEventsPerStage' (", .arrayToString(maxNumberOfEventsPerStage), ") must be not smaller than minNumberOfEventsPerStage' (", .arrayToString(minNumberOfEventsPerStage), ")" ) } .setValueAndParameterType( simulationResults, "minNumberOfEventsPerStage", minNumberOfEventsPerStage, NA_real_ ) .setValueAndParameterType( simulationResults, "maxNumberOfEventsPerStage", maxNumberOfEventsPerStage, NA_real_ ) } else { warning("'conditionalPower' will be ignored for fixed sample design", call. = FALSE) } } else { simulationResults$minNumberOfEventsPerStage <- NA_real_ simulationResults$maxNumberOfEventsPerStage <- NA_real_ simulationResults$.setParameterType("minNumberOfEventsPerStage", C_PARAM_NOT_APPLICABLE) simulationResults$.setParameterType("maxNumberOfEventsPerStage", C_PARAM_NOT_APPLICABLE) simulationResults$.setParameterType("conditionalPower", C_PARAM_NOT_APPLICABLE) } if (!is.na(conditionalPower) && (design$kMax == 1)) { warning("'conditionalPower' will be ignored for fixed sample design", call. = FALSE) } accrualSetup <- getAccrualTime( accrualTime = accrualTime, accrualIntensity = accrualIntensity, accrualIntensityType = accrualIntensityType, maxNumberOfSubjects = maxNumberOfSubjects ) if (is.na(accrualSetup$maxNumberOfSubjects)) { if (identical(accrualIntensity, 1L)) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "choose a 'accrualIntensity' > 1 or define 'maxNumberOfSubjects'" ) } stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'maxNumberOfSubjects' must be defined" ) } simulationResults$.setParameterType("seed", ifelse(is.na(seed), C_PARAM_DEFAULT_VALUE, C_PARAM_USER_DEFINED )) simulationResults$seed <- .setSeed(seed) simulationResults$.accrualTime <- accrualSetup accrualTime <- accrualSetup$.getAccrualTimeWithoutLeadingZero() simulationResults$maxNumberOfSubjects <- accrualSetup$maxNumberOfSubjects simulationResults$.setParameterType( "maxNumberOfSubjects", accrualSetup$.getParameterType("maxNumberOfSubjects") ) simulationResults$accrualTime <- accrualSetup$.getAccrualTimeWithoutLeadingZero() simulationResults$.setParameterType("accrualTime", accrualSetup$.getParameterType("accrualTime")) simulationResults$accrualIntensity <- accrualSetup$accrualIntensity simulationResults$.setParameterType( "accrualIntensity", accrualSetup$.getParameterType("accrualIntensity") ) simulationResults$plannedEvents <- plannedEvents simulationResults$.setParameterType("plannedEvents", C_PARAM_USER_DEFINED) pwsTimeObject <- getPiecewiseSurvivalTime( piecewiseSurvivalTime = piecewiseSurvivalTime, lambda2 = lambda2, lambda1 = lambda1, median1 = median1, median2 = median2, hazardRatio = hazardRatio, pi1 = pi1, pi2 = pi2, eventTime = eventTime, kappa = kappa, delayedResponseAllowed = TRUE, .pi1Default = C_PI_1_DEFAULT ) simulationResults$.piecewiseSurvivalTime <- pwsTimeObject simulationResults$hazardRatio <- pwsTimeObject$hazardRatio simulationResults$.setParameterType("hazardRatio", pwsTimeObject$.getParameterType("hazardRatio")) simulationResults$.setParameterType("eventTime", pwsTimeObject$.getParameterType("eventTime")) simulationResults$eventTime <- pwsTimeObject$eventTime if (.isLambdaBasedSimulationEnabled(pwsTimeObject)) { simulationResults$piecewiseSurvivalTime <- pwsTimeObject$piecewiseSurvivalTime simulationResults$.setParameterType("piecewiseSurvivalTime", C_PARAM_USER_DEFINED) simulationResults$lambda2 <- pwsTimeObject$lambda2 simulationResults$.setParameterType("lambda2", pwsTimeObject$.getParameterType("lambda2")) lambdaVec2 <- simulationResults$lambda2 simulationResults$lambda1 <- pwsTimeObject$lambda1 simulationResults$.setParameterType("lambda1", pwsTimeObject$.getParameterType("lambda1")) if (any(is.na(pwsTimeObject$lambda1))) { .assertIsValidHazardRatioVector(pwsTimeObject$hazardRatio) .setValueAndParameterType( simulationResults, "hazardRatio", pwsTimeObject$hazardRatio, NA_real_ ) numberOfResults <- length(simulationResults$hazardRatio) lambdaVec1 <- simulationResults$lambda2 * pwsTimeObject$hazardRatio } else { numberOfResults <- 1 lambdaVec1 <- pwsTimeObject$lambda1 } .warnInCaseOfDefinedPiValue(simulationResults, "pi1") .warnInCaseOfDefinedPiValue(simulationResults, "pi2") simulationResults$pi1 <- pwsTimeObject$pi1 simulationResults$pi2 <- pwsTimeObject$pi2 simulationResults$.setParameterType("pi1", pwsTimeObject$.getParameterType("pi1")) simulationResults$.setParameterType("pi2", pwsTimeObject$.getParameterType("pi2")) simulationResults$median1 <- pwsTimeObject$median1 simulationResults$median2 <- pwsTimeObject$median2 simulationResults$.setParameterType("median1", pwsTimeObject$.getParameterType("median1")) simulationResults$.setParameterType("median2", pwsTimeObject$.getParameterType("median2")) cdfValues1 <- .getPiecewiseExponentialDistribution( pwsTimeObject$piecewiseSurvivalTime, lambdaVec1, pwsTimeObject$piecewiseSurvivalTime, kappa = kappa ) cdfValues2 <- .getPiecewiseExponentialDistribution( pwsTimeObject$piecewiseSurvivalTime, lambdaVec2, pwsTimeObject$piecewiseSurvivalTime, kappa = kappa ) if (length(cdfValues1) == 1) { cdfValues1 <- NA_real_ cdfValues2 <- NA_real_ } else { cdfValues1 <- cdfValues1[2:length(cdfValues1)] # use values without a leading 0 cdfValues2 <- cdfValues2[2:length(cdfValues2)] } pi1 <- NA_real_ pi2 <- NA_real_ } else { numberOfResults <- .initDesignPlanSurvivalByPiecewiseSurvivalTimeObject( simulationResults, pwsTimeObject ) pi1 <- simulationResults$pi1 if (all(is.na(pi1))) { pi1 <- getPiByLambda(simulationResults$lambda1, eventTime = eventTime, kappa = kappa) simulationResults$pi1 <- pi1 simulationResults$.setParameterType("pi1", C_PARAM_GENERATED) } pi2 <- simulationResults$pi2 if (all(is.na(pi2))) { pi2 <- getPiByLambda(simulationResults$lambda2, eventTime = eventTime, kappa = kappa) simulationResults$pi2 <- pi2 simulationResults$.setParameterType("pi2", C_PARAM_GENERATED) } simulationResults$piecewiseSurvivalTime <- NA_real_ lambdaVec1 <- NA_real_ lambdaVec2 <- NA_real_ cdfValues1 <- NA_real_ cdfValues2 <- NA_real_ } numberOfSimStepsTotal <- numberOfResults * maxNumberOfIterations * accrualSetup$maxNumberOfSubjects maxNumberOfSimStepsTotal <- 10 * 100000 * 100 if (numberOfSimStepsTotal > maxNumberOfSimStepsTotal) { if (!longTimeSimulationAllowed) { stop( "Simulation stopped because long time simulation is disabled ", "and the defined number of single simulation steps (", numberOfSimStepsTotal, ") is larger than the threshold ", maxNumberOfSimStepsTotal, ". ", "Set 'longTimeSimulationAllowed = TRUE' to enable simulations ", "that take a long time (> 30 sec)" ) } message( "Note that the simulation may take a long time because ", sprintf("%.0f", numberOfSimStepsTotal), " single simulation steps must be calculated" ) } .setValueAndParameterType(simulationResults, "directionUpper", directionUpper, C_DIRECTION_UPPER_DEFAULT) .setValueAndParameterType(simulationResults, "dropoutRate1", dropoutRate1, C_DROP_OUT_RATE_1_DEFAULT) .setValueAndParameterType(simulationResults, "dropoutRate2", dropoutRate2, C_DROP_OUT_RATE_2_DEFAULT) .setValueAndParameterType(simulationResults, "dropoutTime", dropoutTime, C_DROP_OUT_TIME_DEFAULT) .setValueAndParameterType(simulationResults, "thetaH0", thetaH0, C_THETA_H0_SURVIVAL_DEFAULT) allocationFraction <- getFraction(allocation1 / allocation2) if (allocationFraction[1] != allocation1 || allocationFraction[2] != allocation2) { warning(sprintf( "allocation1 = %s and allocation2 = %s was replaced by allocation1 = %s and allocation2 = %s", allocation1, allocation2, allocationFraction[1], allocationFraction[2] ), call. = FALSE) allocation1 <- allocationFraction[1] allocation2 <- allocationFraction[2] } .setValueAndParameterType(simulationResults, "allocation1", allocation1, C_ALLOCATION_1_DEFAULT) .setValueAndParameterType(simulationResults, "allocation2", allocation2, C_ALLOCATION_2_DEFAULT) allocationRatioPlanned <- allocation1 / allocation2 .setValueAndParameterType( simulationResults, "allocationRatioPlanned", allocationRatioPlanned, C_ALLOCATION_RATIO_DEFAULT ) .setValueAndParameterType(simulationResults, "conditionalPower", conditionalPower, NA_real_) if (!is.na(thetaH0) && !is.na(thetaH1) && thetaH0 != 1) { thetaH1 <- thetaH1 / thetaH0 .setValueAndParameterType(simulationResults, "thetaH1", thetaH1, NA_real_) simulationResults$.setParameterType("thetaH1", C_PARAM_GENERATED) } else { .setValueAndParameterType(simulationResults, "thetaH1", thetaH1, NA_real_) } if (is.na(conditionalPower)) { simulationResults$.setParameterType("thetaH1", C_PARAM_NOT_APPLICABLE) } .setValueAndParameterType(simulationResults, "kappa", kappa, 1) .setValueAndParameterType( simulationResults, "maxNumberOfIterations", as.integer(maxNumberOfIterations), C_MAX_SIMULATION_ITERATIONS_DEFAULT ) phi <- -c(log(1 - dropoutRate1), log(1 - dropoutRate2)) / dropoutTime densityIntervals <- accrualTime if (length(accrualTime) > 1) { densityIntervals[2:length(accrualTime)] <- accrualTime[2:length(accrualTime)] - accrualTime[1:(length(accrualTime) - 1)] } densityVector <- accrualSetup$accrualIntensity / sum(densityIntervals * accrualSetup$accrualIntensity) intensityReplications <- round(densityVector * densityIntervals * accrualSetup$maxNumberOfSubjects) if (all(intensityReplications > 0)) { accrualTimeValue <- cumsum(rep( 1 / (densityVector * accrualSetup$maxNumberOfSubjects), intensityReplications )) } else { accrualTimeValue <- cumsum(rep( 1 / (densityVector[1] * accrualSetup$maxNumberOfSubjects), intensityReplications[1] )) if (length(accrualIntensity) > 1 && length(intensityReplications) > 1) { for (i in 2:min(length(accrualIntensity), length(intensityReplications))) { if (intensityReplications[i] > 0) { accrualTimeValue <- c( accrualTimeValue, accrualTime[i - 1] + cumsum(rep( 1 / (densityVector[i] * accrualSetup$maxNumberOfSubjects), intensityReplications[i] )) ) } } } } accrualTimeValue <- accrualTimeValue[1:accrualSetup$maxNumberOfSubjects] # to avoid last value to be NA_real_ i <- accrualSetup$maxNumberOfSubjects while (is.na(accrualTimeValue[i])) { accrualTimeValue[i] <- accrualTime[length(accrualTime)] i <- i - 1 } treatmentGroup <- rep( c(rep(1, allocation1), rep(2, allocation2)), ceiling(accrualSetup$maxNumberOfSubjects / (allocation1 + allocation2)) )[1:accrualSetup$maxNumberOfSubjects] if (.isTrialDesignFisher(design)) { alpha0Vec <- design$alpha0Vec futilityBounds <- rep(NA_real_, design$kMax - 1) } else { alpha0Vec <- rep(NA_real_, design$kMax - 1) futilityBounds <- design$futilityBounds } if (.isTrialDesignGroupSequential(design)) { designNumber <- 1L } else if (.isTrialDesignInverseNormal(design)) { designNumber <- 2L } else if (.isTrialDesignFisher(design)) { designNumber <- 3L } calcSubjectsFunctionList <- .getCalcSubjectsFunction( design = design, simulationResults = simulationResults, calcFunction = calcEventsFunction, expectedFunction = function(stage, conditionalPower, thetaH0, estimatedTheta, plannedEvents, eventsOverStages, minNumberOfEventsPerStage, maxNumberOfEventsPerStage, allocationRatioPlanned, conditionalCriticalValue) { NULL } ) calcEventsFunctionType <- calcSubjectsFunctionList$calcSubjectsFunctionType calcEventsFunctionR <- calcSubjectsFunctionList$calcSubjectsFunctionR calcEventsFunctionCpp <- calcSubjectsFunctionList$calcSubjectsFunctionCpp resultData <- getSimulationSurvivalCpp( designNumber = designNumber, kMax = design$kMax, sided = design$sided, criticalValues = design$criticalValues, informationRates = design$informationRates, conditionalPower = conditionalPower, plannedEvents = plannedEvents, thetaH1 = thetaH1, minNumberOfEventsPerStage = minNumberOfEventsPerStage, maxNumberOfEventsPerStage = maxNumberOfEventsPerStage, directionUpper = directionUpper, allocationRatioPlanned = allocationRatioPlanned, accrualTime = accrualTimeValue, treatmentGroup = treatmentGroup, thetaH0 = thetaH0, futilityBounds = futilityBounds, alpha0Vec = alpha0Vec, pi1Vec = pi1, pi2 = pi2, eventTime = eventTime, piecewiseSurvivalTime = .getPiecewiseExpStartTimesWithoutLeadingZero(pwsTimeObject$piecewiseSurvivalTime), cdfValues1 = cdfValues1, cdfValues2 = cdfValues2, lambdaVec1 = lambdaVec1, lambdaVec2 = lambdaVec2, phi = phi, maxNumberOfSubjects = accrualSetup$maxNumberOfSubjects, maxNumberOfIterations = maxNumberOfIterations, maxNumberOfRawDatasetsPerStage = maxNumberOfRawDatasetsPerStage, kappa = kappa, calcEventsFunctionType = calcEventsFunctionType, calcEventsFunctionR = calcEventsFunctionR, calcEventsFunctionCpp = calcEventsFunctionCpp ) overview <- resultData$overview if (length(overview) == 0 || nrow(overview) == 0) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "no simulation results calculated") } n <- nrow(overview) overview <- cbind( design = rep(sub("^TrialDesign", "", .getClassName(design)), n), overview ) if (pwsTimeObject$.isPiBased() && pwsTimeObject$.getParameterType("hazardRatio") != C_PARAM_USER_DEFINED) { simulationResults$hazardRatio <- matrix(overview$hazardRatio, nrow = design$kMax)[1, ] } simulationResults$iterations <- matrix(as.integer(overview$iterations), nrow = design$kMax) if (!is.null(overview$eventsPerStage)) { simulationResults$eventsPerStage <- matrix(overview$eventsPerStage, nrow = design$kMax) } simulationResults$eventsNotAchieved <- matrix(overview$eventsNotAchieved, nrow = design$kMax) if (any(simulationResults$eventsNotAchieved > 0)) { warning("Presumably due to drop-outs, required number of events ", "were not achieved for at least one situation. ", "Increase the maximum number of subjects (", accrualSetup$maxNumberOfSubjects, ") ", "to avoid this situation", call. = FALSE ) } simulationResults$numberOfSubjects <- matrix(overview$numberOfSubjects, nrow = design$kMax) simulationResults$numberOfSubjects1 <- .getNumberOfSubjects1(simulationResults$numberOfSubjects, allocationRatioPlanned) simulationResults$numberOfSubjects2 <- .getNumberOfSubjects2(simulationResults$numberOfSubjects, allocationRatioPlanned) if (any(allocationRatioPlanned != 1)) { simulationResults$.setParameterType("numberOfSubjects1", C_PARAM_GENERATED) simulationResults$.setParameterType("numberOfSubjects2", C_PARAM_GENERATED) } simulationResults$overallReject <- matrix(overview$overallReject, nrow = design$kMax)[1, ] if (design$kMax > 1) { simulationResults$rejectPerStage <- matrix(overview$rejectPerStage, nrow = design$kMax) } else { simulationResults$rejectPerStage <- matrix(simulationResults$overallReject, nrow = 1) } if (!all(is.na(overview$conditionalPowerAchieved))) { simulationResults$conditionalPowerAchieved <- matrix( overview$conditionalPowerAchieved, nrow = design$kMax ) simulationResults$.setParameterType("conditionalPowerAchieved", C_PARAM_GENERATED) } if (design$kMax == 1) { simulationResults$.setParameterType("numberOfSubjects", C_PARAM_NOT_APPLICABLE) simulationResults$.setParameterType("eventsPerStage", C_PARAM_NOT_APPLICABLE) } if (design$kMax > 1) { if (numberOfResults == 1) { simulationResults$futilityPerStage <- matrix( overview$futilityPerStage[1:(design$kMax - 1)], nrow = design$kMax - 1 ) } else { simulationResults$futilityPerStage <- matrix( matrix( overview$futilityPerStage, nrow = design$kMax )[1:(design$kMax - 1), ], nrow = design$kMax - 1 ) } } if (design$kMax > 1) { simulationResults$futilityStop <- matrix(overview$futilityStop, nrow = design$kMax)[1, ] simulationResults$earlyStop <- simulationResults$futilityStop + simulationResults$overallReject - simulationResults$rejectPerStage[design$kMax, ] } else { simulationResults$futilityStop <- rep(0, numberOfResults) simulationResults$earlyStop <- rep(0, numberOfResults) } simulationResults$analysisTime <- matrix(overview$analysisTime, nrow = design$kMax) simulationResults$studyDuration <- matrix(overview$studyDuration, nrow = design$kMax)[1, ] if (design$kMax > 1) { subData <- simulationResults$rejectPerStage[1:(design$kMax - 1), ] + simulationResults$futilityPerStage pStop <- rbind(subData, 1 - colSums(subData)) numberOfSubjects <- simulationResults$numberOfSubjects numberOfSubjects[is.na(numberOfSubjects)] <- 0 simulationResults$expectedNumberOfSubjects <- diag(t(numberOfSubjects) %*% pStop) if (!is.null(simulationResults$eventsPerStage) && nrow(simulationResults$eventsPerStage) > 0 && ncol(simulationResults$eventsPerStage) > 0) { simulationResults$overallEventsPerStage <- .convertStageWiseToOverallValues( simulationResults$eventsPerStage ) simulationResults$.setParameterType("overallEventsPerStage", C_PARAM_GENERATED) simulationResults$expectedNumberOfEvents <- diag(t(simulationResults$overallEventsPerStage) %*% pStop) } } else { simulationResults$expectedNumberOfSubjects <- as.numeric(simulationResults$numberOfSubjects) if (!is.null(simulationResults$eventsPerStage) && nrow(simulationResults$eventsPerStage) > 0 && ncol(simulationResults$eventsPerStage) > 0) { simulationResults$overallEventsPerStage <- simulationResults$eventsPerStage simulationResults$expectedNumberOfEvents <- as.numeric(simulationResults$overallEventsPerStage) } } if (is.null(simulationResults$expectedNumberOfEvents) || length(simulationResults$expectedNumberOfEvents) == 0) { warning("Failed to calculate expected number of events", call. = FALSE) } simulationResults$.data <- resultData$data[!is.na(resultData$data$iterationNumber), ] stages <- 1:design$kMax rawData <- resultData$rawData if (!is.null(rawData) && nrow(rawData) > 0 && ncol(rawData) > 0) { rawData <- rawData[!is.na(rawData$iterationNumber), ] } if (!is.null(rawData) && nrow(rawData) > 0 && ncol(rawData) > 0) { rawData <- rawData[order(rawData$iterationNumber, rawData$subjectId), ] rownames(rawData) <- NULL stopStageNumbers <- rawData$stopStage missingStageNumbers <- c() if (length(stopStageNumbers) > 0) { stopStageNumbers <- order(unique(stopStageNumbers)) missingStageNumbers <- stages[!which(stages %in% stopStageNumbers)] } else { missingStageNumbers <- stages } if (length(missingStageNumbers) > 0) { warning("Could not get rawData (individual results) for stages ", .arrayToString(missingStageNumbers), call. = FALSE ) } } else { rawData <- data.frame( iterationNumber = numeric(0), stopStage = numeric(0), pi1 = numeric(0), pi2 = numeric(0), subjectId = numeric(0), accrualTime = numeric(0), treatmentGroup = numeric(0), survivalTime = numeric(0), dropoutTime = numeric(0), observationTime = numeric(0), timeUnderObservation = numeric(0), event = logical(0), dropoutEvent = logical(0), censorIndicator = numeric(0) ) if (maxNumberOfRawDatasetsPerStage > 0) { warning("Could not get rawData (individual results) for stages ", .arrayToString(stages), call. = FALSE ) } } if (pwsTimeObject$.isLambdaBased() || length(pi1) < 2) { rawData <- rawData[, !(colnames(rawData) %in% c("pi1", "pi2"))] } # remove censorIndicator because it will not be calculated yet rawData <- rawData[, colnames(rawData) != "censorIndicator"] simulationResults$.rawData <- rawData return(simulationResults) } rpact/R/class_summary.R0000644000176200001440000047261714450551044014614 0ustar liggesusers## | ## | *Summary classes and functions* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | rpact package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File version: $Revision: 7148 $ ## | Last changed: $Date: 2023-07-03 15:50:22 +0200 (Mo, 03 Jul 2023) $ ## | Last changed by: $Author: pahlke $ ## | #' @include f_core_utilities.R #' @include f_core_assertions.R NULL SummaryItem <- setRefClass("SummaryItem", fields = list( title = "character", values = "character", legendEntry = "list" ), methods = list( initialize = function(title = NA_character_, values = NA_character_, ...) { callSuper(title = title, values = values, ...) if (!is.null(legendEntry) && length(legendEntry) > 0) { if (is.null(names(legendEntry))) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("legendEntry"), " must be a named list") } for (l in legendEntry) { if (length(l) == 0) { stop(C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, sQuote("legendEntry"), " must be not empty") } } } }, show = function() { cat(title, "=", values, "\n") }, toList = function() { result <- list() result[[title]] <- values } ) ) #' #' @title #' Summary Factory Plotting #' #' @param x The summary factory object. #' @param y Not available for this kind of plot (is only defined to be compatible to the generic plot function). #' @param showSummary Show the summary before creating the plot output, default is \code{FALSE}. #' @inheritParams param_three_dots_plot #' #' @description #' Plots a summary factory. #' #' @details #' Generic function to plot all kinds of summary factories. #' #' @template return_object_ggplot #' #' @export #' plot.SummaryFactory <- function(x, y, ..., showSummary = FALSE) { fCall <- match.call(expand.dots = FALSE) if (isTRUE(showSummary) || .isSummaryPipe(fCall)) { markdown <- .getOptionalArgument("markdown", ..., optionalArgumentDefaultValue = FALSE) if (markdown) { x$.catMarkdownText() } else { x$show() } } plot(x = x$object, y = y, ...) } #' #' @title #' Summary Factory Printing #' #' @param x The summary factory object. #' @param markdown If \code{TRUE}, the object \code{x} will be printed using markdown syntax; #' normal representation will be used otherwise (default is \code{FALSE}) #' @param showSummary Show the summary before creating the print output, default is \code{FALSE}. #' @param sep The separator line between the summary and the print output. #' @inheritParams param_three_dots_plot #' #' @description #' Prints the result object stored inside a summary factory. #' #' @details #' Generic function to print all kinds of summary factories. #' #' @export #' print.SummaryFactory <- function(x, ..., markdown = FALSE, showSummary = FALSE, sep = "\n-----\n\n") { fCall <- match.call(expand.dots = FALSE) if (isTRUE(showSummary) || .isSummaryPipe(fCall)) { .assertIsSingleCharacter(sep, "sep") if (markdown) { x$.catMarkdownText() } else { x$show() } cat(sep) } print(x$object, markdown = markdown) } #' @name SummaryFactory #' #' @title #' Summary Factory #' #' @description #' Basic class for summaries #' #' @keywords internal #' #' @importFrom methods new #' SummaryFactory <- setRefClass("SummaryFactory", contains = "ParameterSet", fields = list( object = "ParameterSet", title = "character", header = "character", summaryItems = "list", intervalFormat = "character", justify = "character", output = "character" ), methods = list( initialize = function(..., intervalFormat = "[%s; %s]", output = "all") { callSuper(..., intervalFormat = intervalFormat, output = output) summaryItems <<- list() justify <<- getOption("rpact.summary.justify", "right") }, show = function(showType = 1, digits = NA_integer_) { .show(showType = showType, digits = digits, consoleOutputEnabled = TRUE) }, .show = function(showType = 1, digits = NA_integer_, ..., consoleOutputEnabled = TRUE) { if (output %in% c("all", "title")) { if (is.null(title) || length(title) == 0) { title <<- .createSummaryTitleObject(object) } if (!is.null(title) && length(title) == 1 && trimws(title) != "") { .cat(title, "\n\n", heading = 1, consoleOutputEnabled = consoleOutputEnabled ) } } if (output %in% c("all", "overview")) { if (is.null(header) || length(header) == 0) { header <<- .createSummaryHeaderObject(object, .self, digits) } if (!is.null(header) && length(header) == 1 && trimws(header) != "") { .cat(header, "\n\n", consoleOutputEnabled = consoleOutputEnabled ) } } if (!(output %in% c("all", "body"))) { return(invisible()) } legendEntries <- c() legendEntriesUnique <- c() summaryItemNames <- c() for (summaryItem in summaryItems) { if (!is.null(summaryItem$title) && length(summaryItem$title) == 1 && !is.na(summaryItem$title)) { summaryItemNames <- c(summaryItemNames, summaryItem$title) } if (length(summaryItem$legendEntry) > 0) { a <- sort(names(summaryItem$legendEntry)) for (aa in a) { if (!(aa %in% legendEntriesUnique)) { legendEntriesUnique <- c(legendEntriesUnique, aa) b <- summaryItem$legendEntry[[aa]] legendEntries <- c(legendEntries, paste0(" ", aa, ": ", b)) } } } } summaryItemNames <- paste0(format(summaryItemNames), " ") na <- ifelse(.isDataset(object), "NA", NA_character_) tableColumns <- 0 maxValueWidth <- 1 if (length(summaryItems) > 0) { for (i in 1:length(summaryItems)) { validValues <- na.omit(summaryItems[[i]]$values) if (length(validValues) > 0) { w <- max(nchar(validValues)) maxValueWidth <- max(maxValueWidth, w) tableColumns <- max(tableColumns, 1 + length(validValues)) } } spaceString <- paste0(rep(" ", maxValueWidth + 1), collapse = "") for (i in 1:length(summaryItems)) { itemTitle <- summaryItems[[i]]$title if (!is.null(itemTitle) && length(itemTitle) == 1 && !is.na(itemTitle)) { summaryItemName <- summaryItemNames[i] values <- summaryItems[[i]]$values values <- trimws(values) indices <- !grepl("(\\])$", values) values[indices] <- paste0(values[indices], " ") values <- format(c(spaceString, values), justify = justify)[2:(length(values) + 1)] .cat(summaryItemName, values, "\n", tableColumns = tableColumns, consoleOutputEnabled = consoleOutputEnabled, na = na ) if (!consoleOutputEnabled && trimws(summaryItemName) == "Stage") { .cat(rep("----- ", tableColumns), "\n", tableColumns = tableColumns, consoleOutputEnabled = consoleOutputEnabled, na = na ) } } } } if (length(legendEntries) > 0) { .cat("\n", consoleOutputEnabled = consoleOutputEnabled) .cat("Legend:\n", consoleOutputEnabled = consoleOutputEnabled) if (!consoleOutputEnabled) { .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } for (legendEntry in legendEntries) { .cat(legendEntry, "\n", consoleOutputEnabled = consoleOutputEnabled) } .cat("\n", consoleOutputEnabled = consoleOutputEnabled) } }, addItem = function(title, values, legendEntry = list()) { if (!is.character(values)) { values <- as.character(values) } tryCatch( { addSummaryItem(SummaryItem(title = title, values = values, legendEntry = legendEntry)) }, error = function(e) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to add summary item '", title, "' = ", .arrayToString(values), " (class: ", .getClassName(values), "): ", e$message ) } ) }, addSummaryItem = function(summaryItem) { if (!inherits(summaryItem, "SummaryItem")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'summaryItem' must be an instance of class 'SummaryItem' (was '", .getClassName(summaryItem), "')" ) } summaryItems <<- c(summaryItems, summaryItem) }, .getFormattedParameterValue = function(valuesToShow, valuesToShow2) { naText <- getOption("rpact.summary.na", "") if (length(valuesToShow) == length(valuesToShow2) && !all(is.na(valuesToShow2))) { for (variantIndex in 1:length(valuesToShow)) { value1 <- as.character(valuesToShow[variantIndex]) value2 <- as.character(valuesToShow2[variantIndex]) if (grepl("^ *NA *$", value1)) { value1 <- naText } if (grepl("^ *NA *$", value2)) { value2 <- naText } if (trimws(value1) == "" && trimws(value2) == "") { valuesToShow[variantIndex] <- naText } else { valuesToShow[variantIndex] <- sprintf(intervalFormat, value1, value2) } } } else { valuesToShow[is.na(valuesToShow) | trimws(valuesToShow) == "NA"] <- naText } return(valuesToShow) }, addParameter = function(parameterSet, ..., parameterName = NULL, values = NULL, parameterCaption, roundDigits = NA_integer_, ceilingEnabled = FALSE, cumsumEnabled = FALSE, twoSided = FALSE, transpose = FALSE, smoothedZeroFormat = FALSE, parameterCaptionSingle = parameterCaption, legendEntry = list(), enforceFirstCase = FALSE, formatRepeatedPValues = FALSE) { if (!is.null(parameterName) && length(parameterName) == 1 && inherits(parameterSet, "ParameterSet") && parameterSet$.getParameterType(parameterName) == C_PARAM_NOT_APPLICABLE) { if (.getLogicalEnvironmentVariable("RPACT_DEVELOPMENT_MODE")) { warning( "Failed to add parameter ", .arrayToString(parameterName), " (", .arrayToString(values), ") stored in ", .getClassName(parameterSet), " because the parameter has type C_PARAM_NOT_APPLICABLE" ) } return(invisible()) } parameterName1 <- parameterName[1] if (!is.null(parameterName1) && is.character(parameterName1) && is.null(values)) { values <- parameterSet[[parameterName1]] if (is.null(values)) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, .getClassName(parameterSet), " does not contain a field '", parameterName1, "'" ) } } parameterName2 <- NA_character_ values2 <- NA_real_ if (!is.null(parameterName) && length(parameterName) > 1) { parameterName2 <- parameterName[2] values2 <- parameterSet[[parameterName2]] parameterName <- parameterName[1] if (is.null(values2)) { stop( C_EXCEPTION_TYPE_RUNTIME_ISSUE, .getClassName(parameterSet), " does not contain a field '", parameterName2, "'" ) } } if (is.null(values) && is.null(parameterName1)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'parameterName' or 'values' must be defined") } if (transpose) { if (!is.matrix(values)) { values <- as.matrix(values) } else { values <- t(values) } } if (is.list(parameterSet) && is.matrix(values)) { parameterSet <- parameterSet[["parameterSet"]] if (is.null(parameterSet)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'parameterSet' must be added to list") } } parameterNames <- "" numberOfVariants <- 1 numberOfStages <- ifelse(is.matrix(values), ncol(values), length(values)) if (inherits(parameterSet, "ParameterSet")) { parameterNames <- parameterSet$.getVisibleFieldNamesOrdered() numberOfVariants <- .getMultidimensionalNumberOfVariants(parameterSet, parameterNames) numberOfStages <- parameterSet$.getMultidimensionalNumberOfStages(parameterNames) } stages <- parameterSet[["stages"]] if (is.null(stages) && !is.null(parameterSet[[".stageResults"]])) { stages <- parameterSet[[".stageResults"]][["stages"]] } if (is.null(stages) && inherits(parameterSet, "ClosedCombinationTestResults")) { stages <- parameterSet[[".design"]][["stages"]] } if (!is.null(stages) && length(stages) > 0) { numberOfStages <- max(na.omit(stages)) if (is.matrix(values) && nrow(values) > 0) { numberOfVariants <- nrow(values) } if (is.matrix(values) && ncol(values) > 0) { numberOfStages <- ncol(values) } } if (!is.null(parameterSet[[".piecewiseSurvivalTime"]]) && isTRUE(parameterSet[[".piecewiseSurvivalTime"]]$delayedResponseEnabled)) { numberOfVariants <- 1 } if (twoSided) { values <- 2 * values } caseCondition <- list( and1 = enforceFirstCase, and2 = inherits(parameterSet, "Dataset"), and3 = list( or1 = list( and1 = !transpose, and2 = numberOfVariants == 1 ), or2 = list( and1 = !is.matrix(values), and2 = (!transpose && ncol(values) == 1), and3 = (transpose && nrow(values) == 1) ), or3 = list( and1 = .isTrialDesign(parameterSet), and2 = (numberOfStages > 1 && numberOfStages == length(values)), and3 = length(values) != numberOfVariants, and4 = length(values) == 1, and5 = parameterName %in% c( "futilityBoundsEffectScale", "futilityBoundsEffectScaleLower", "futilityBoundsEffectScaleUpper", "futilityPerStage" ) ) ) ) if (.isConditionTrue(caseCondition, "or", showDebugMessages = FALSE)) { valuesToShow <- .getSummaryValuesFormatted( parameterSet, parameterName1, values, roundDigits = roundDigits, ceilingEnabled = ceilingEnabled, cumsumEnabled = cumsumEnabled, smoothedZeroFormat = smoothedZeroFormat, formatRepeatedPValues = formatRepeatedPValues ) if (parameterName1 %in% c("piControl", "overallPiControl", "overallPooledStDevs")) { valuesToShow <- .getInnerValues(valuesToShow, transpose = TRUE) } else { valuesToShow <- .getInnerValues(valuesToShow, transpose = transpose) } valuesToShow2 <- NA_real_ if (!all(is.na(values2))) { valuesToShow2 <- .getSummaryValuesFormatted(parameterSet, parameterName1, values2, roundDigits = roundDigits, ceilingEnabled = ceilingEnabled, cumsumEnabled = cumsumEnabled, smoothedZeroFormat = smoothedZeroFormat, formatRepeatedPValues = formatRepeatedPValues ) valuesToShow2 <- .getInnerValues(valuesToShow2, transpose = transpose) } valuesToShow <- .getFormattedParameterValue(valuesToShow, valuesToShow2) addItem(parameterCaptionSingle, valuesToShow, legendEntry) } else { if (!inherits(parameterSet, "ParameterSet")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "for varied values 'parameterSet' must be an instance of ", "class 'ParameterSet' (was '", .getClassName(parameterSet), "')" ) } transposed <- !transpose && grepl("MultiArm|Enrichment", .getClassName(parameterSet)) && (!is.matrix(values) || ncol(values) > 1) userDefinedEffectMatrix <- FALSE if (grepl("MultiArm|Enrichment", .getClassName(parameterSet)) || inherits(parameterSet, "AnalysisResultsConditionalDunnett") || inherits(parameterSet, "ClosedCombinationTestResults") || inherits(parameterSet, "ConditionalPowerResults")) { if (grepl("SimulationResults(MultiArm|Enrichment)", .getClassName(parameterSet)) && parameterName %in% c( "rejectAtLeastOne", "earlyStop", "futilityPerStage", "successPerStage", "expectedNumberOfSubjects", "expectedNumberOfEvents", "singleNumberOfEventsPerStage", "numberOfActiveArms", "numberOfPopulations", "conditionalPowerAchieved" )) { transposed <- TRUE userDefinedEffectMatrix <- parameterSet$.getParameterType("effectMatrix") == C_PARAM_USER_DEFINED if (userDefinedEffectMatrix) { legendEntry[["[j]"]] <- "effect matrix row j (situation to consider)" } if (grepl("Survival", .getClassName(parameterSet)) && !grepl("Enrichment", .getClassName(parameterSet))) { legendEntry[["(i)"]] <- "results of treatment arm i vs. control arm" } if (grepl("SimulationResultsEnrichment", .getClassName(parameterSet))) { variedParameterName <- .getSummaryVariedParameterNameEnrichment(parameterSet) variedParameterValues <- parameterSet$effectList[[variedParameterName]] if (variedParameterName == "piTreatments") { variedParameterCaption <- "pi(treatment)" } else { variedParameterCaption <- C_PARAMETER_NAMES[[variedParameterName]] if (is.matrix(variedParameterValues) && ncol(variedParameterValues) == 1) { variedParameterCaption <- sub("s$", "", variedParameterCaption) } } if (is.matrix(variedParameterValues)) { numberOfVariants <- nrow(variedParameterValues) } else { numberOfVariants <- length(variedParameterValues) } } else { variedParameterName <- .getVariedParameterSimulationMultiArm(parameterSet) variedParameterValues <- parameterSet[[variedParameterName]] variedParameterCaption <- C_PARAMETER_NAMES[[variedParameterName]] numberOfVariants <- length(variedParameterValues) } variedParameterCaption <- tolower(variedParameterCaption) } else if (.isEnrichmentObject(parameterSet)) { transposed <- TRUE variedParameterCaption <- "populations" if (parameterName1 %in% c( "indices", "conditionalErrorRate", "secondStagePValues", "adjustedStageWisePValues", "overallAdjustedTestStatistics", "rejectedIntersections" )) { if (.isEnrichmentAnalysisResults(parameterSet)) { variedParameterValues <- parameterSet$.closedTestResults$.getHypothesisPopulationVariants() } else { variedParameterValues <- parameterSet$.getHypothesisPopulationVariants() } } else { variedParameterValues <- c(paste0("S", 1:(numberOfVariants - 1)), "F") } numberOfVariants <- length(variedParameterValues) legendEntry[["S[i]"]] <- "population i" legendEntry[["F"]] <- "full population" } else if (!inherits(parameterSet, "ClosedCombinationTestResults") || parameterName %in% c("rejected", "separatePValues")) { if (inherits(parameterSet, "AnalysisResultsConditionalDunnett") && (!is.matrix(values) || ncol(values) > 1)) { transposed <- TRUE } if (inherits(parameterSet, "ClosedCombinationTestResults") && parameterSet$.getParameterType("adjustedStageWisePValues") != "g" && parameterName == "separatePValues") { transposed <- TRUE } if (inherits(parameterSet, "ClosedCombinationTestResults") && parameterName %in% c("rejected")) { transposed <- TRUE } if (inherits(parameterSet, "ConditionalPowerResults") && parameterName %in% c("conditionalPower", "values")) { transposed <- TRUE } variedParameterCaption <- "arm" variedParameterValues <- 1:numberOfVariants legendEntry[["(i)"]] <- "results of treatment arm i vs. control arm" } else { transposed <- TRUE variedParameterCaption <- "arms" variedParameterValues <- parameterSet$.getHypothesisTreatmentArmVariants() numberOfVariants <- length(variedParameterValues) legendEntry[["(i, j, ...)"]] <- "comparison of treatment arms 'i, j, ...' vs. control arm" } } else { if (inherits(parameterSet, "Dataset")) { variedParameter <- "groups" } else if (inherits(parameterSet, "PerformanceScore")) { variedParameter <- ".alternative" } else { variedParameter <- parameterSet$.getVariedParameter(parameterNames, numberOfVariants) } if (length(variedParameter) == 0 || variedParameter == "") { warning( "Failed to get varied parameter from ", .getClassName(parameterSet), " (", length(parameterNames), " parameter names; numberOfVariants: ", numberOfVariants, ")" ) return(invisible()) } variedParameterCaption <- parameterSet$.getDataFrameColumnCaption(variedParameter, tableColumnNames = C_TABLE_COLUMN_NAMES, niceColumnNamesEnabled = TRUE ) variedParameterCaption <- tolower(variedParameterCaption) if (variedParameterCaption == "alternative" || variedParameterCaption == ".alternative") { legendEntry[["alt."]] <- "alternative" variedParameterCaption <- "alt." } else if (variedParameterCaption == "hazard ratio") { legendEntry[["HR"]] <- "hazard ratio" variedParameterCaption <- "HR" } else if (grepl("\\(1\\)$", variedParameterCaption)) { groups <- parameterSet[["groups"]] if (!is.null(groups) && length(groups) == 1 && groups == 1) { variedParameterCaption <- sub(" \\(1\\)$", "", variedParameterCaption) } } variedParameterValues <- round(parameterSet[[variedParameter]], 3) } for (variantIndex in 1:numberOfVariants) { colValues <- .getColumnValues(parameterName, values, variantIndex, transposed) colValues <- .getSummaryValuesFormatted(parameterSet, parameterName1, colValues, roundDigits = roundDigits, ceilingEnabled = ceilingEnabled, cumsumEnabled = cumsumEnabled, smoothedZeroFormat = smoothedZeroFormat, formatRepeatedPValues = formatRepeatedPValues ) colValues2 <- NA_real_ if (!all(is.na(values2))) { colValues2 <- .getColumnValues(parameterName, values2, variantIndex, transposed) colValues2 <- .getSummaryValuesFormatted(parameterSet, parameterName2, colValues2, roundDigits = roundDigits, ceilingEnabled = ceilingEnabled, cumsumEnabled = cumsumEnabled, smoothedZeroFormat = smoothedZeroFormat, formatRepeatedPValues = formatRepeatedPValues ) } colValues <- .getFormattedParameterValue(valuesToShow = colValues, valuesToShow2 = colValues2) if (numberOfVariants == 1) { addItem(parameterCaption, colValues, legendEntry) } else if (.isEnrichmentObject(parameterSet)) { addItem(paste0( parameterCaption, " ", variedParameterValues[variantIndex] ), colValues, legendEntry) } else if ( (grepl("MultiArm|Enrichment", .getClassName(parameterSet)) && !grepl("Simulation", .getClassName(parameterSet))) || inherits(parameterSet, "AnalysisResultsConditionalDunnett") || inherits(parameterSet, "ClosedCombinationTestResults") || inherits(parameterSet, "ConditionalPowerResults")) { spacePrefix <- ifelse(parameterCaption %in% c("pi", "lambda", "median"), "", " ") addItem(paste0( parameterCaption, spacePrefix, "(", variedParameterValues[variantIndex], ")" ), colValues, legendEntry) } else if (userDefinedEffectMatrix) { addItem(paste0(parameterCaption, " [", variantIndex, "]"), colValues, legendEntry) } else { if (is.matrix(variedParameterValues) && ncol(variedParameterValues) > 1) { variedParameterValuesFormatted <- .arrayToString(variedParameterValues[variantIndex, ], vectorLookAndFeelEnabled = TRUE) } else { variedParameterValuesFormatted <- variedParameterValues[variantIndex] } addItem( paste0( parameterCaption, ", ", variedParameterCaption, " = ", variedParameterValuesFormatted ), colValues, legendEntry ) } } } }, .isEnrichmentObject = function(parameterSet) { return( .isEnrichmentAnalysisResults(parameterSet) || .isEnrichmentStageResults(parameterSet) || .isEnrichmentConditionalPowerResults(parameterSet) || (inherits(parameterSet, "ClosedCombinationTestResults") && isTRUE(parameterSet$.enrichment)) ) }, .getInnerValues = function(values, transpose = FALSE) { if (!is.matrix(values)) { return(values) } if (nrow(values) == 1 && ncol(values) == 1) { return(values[1, 1]) } if (transpose) { return(values[1, ]) } return(values[, 1]) }, .getColumnValues = function(parameterName, values, variantIndex, transposed = FALSE) { tryCatch( { if (transposed) { if (!is.matrix(values)) { return(values) } if (nrow(values) == 0) { return("") } if (nrow(values) == 1 && ncol(values) == 1) { colValues <- values[1, 1] } else if (nrow(values) == 1) { colValues <- values[1, variantIndex] } else if (ncol(values) == 1) { colValues <- values[variantIndex, 1] } else { colValues <- values[variantIndex, ] } return(colValues) } if (length(values) <= 1 && !is.matrix(values)) { colValues <- values } else if (is.matrix(values)) { if (nrow(values) == 1 && ncol(values) == 1) { colValues <- values[1, 1] } else if (ncol(values) == 1) { colValues <- values[variantIndex, 1] } else if (nrow(values) == 1) { colValues <- values[1, variantIndex] } else { if (ncol(values) == 0) { return("") } colValues <- values[, variantIndex] } } else { colValues <- values[variantIndex] } return(colValues) }, error = function(e) { stop( ".getColumnValues(", dQuote(parameterName), "): ", e$message, "; .getClassName(values) = ", .getClassName(values), "; dim(values) = ", .arrayToString(dim(values), vectorLookAndFeelEnabled = TRUE), "; variantIndex = ", variantIndex, "; transposed = ", transposed ) } ) } ) ) .formatSummaryValues <- function(values, digits, smoothedZeroFormat = FALSE, formatRepeatedPValues = FALSE) { if (is.na(digits)) { digits <- 3 } if (digits < 1) { formattedValue <- as.character(values) formattedValue[is.na(formattedValue) | trimws(formattedValue) == "NA"] <- getOption("rpact.summary.na", "") return(formattedValue) } if (sum(is.na(values)) == length(values)) { formattedValue <- rep(getOption("rpact.summary.na", ""), length(values)) return(formattedValue) } threshold <- 10^-digits text <- "<0." if (digits > 1) { for (i in 1:(digits - 1)) { text <- paste0(text, "0") } } text <- paste0(text, "1") if (smoothedZeroFormat) { values[abs(values) < 1e-15] <- 0 } indices <- (!is.na(values) & values > 1e-10 & abs(values) < threshold) values[!is.na(values) & !indices] <- round(values[!is.na(values) & !indices], digits) if (sum(indices) > 0) { values[indices] <- threshold formattedValue <- .getFormattedValue(values, digits = digits, nsmall = digits, scientific = FALSE) formattedValue[indices] <- text } else { formattedValue <- .getFormattedValue(values, digits = digits, nsmall = digits, scientific = FALSE) formattedValue <- format(formattedValue, scientific = FALSE) } if (formatRepeatedPValues) { formattedValue[!is.na(formattedValue) & nchar(gsub("\\D", "", (formattedValue))) > 0 & formattedValue > 0.4999] <- ">0.5" } if (as.logical(getOption("rpact.summary.trim.zeroes", TRUE))) { zeroes <- grepl("^0\\.0*$", formattedValue) if (sum(zeroes) > 0) { formattedValue[zeroes] <- "0" } } formattedValue[is.na(formattedValue) | trimws(formattedValue) == "NA"] <- getOption("rpact.summary.na", "") return(formattedValue) } .getSummaryValuesFormatted <- function(fieldSet, parameterName, values, roundDigits = NA_integer_, ceilingEnabled = FALSE, cumsumEnabled = FALSE, smoothedZeroFormat = FALSE, formatRepeatedPValues = FALSE) { if (!is.numeric(values)) { return(values) } if (cumsumEnabled) { values <- cumsum(values) } if (ceilingEnabled) { values <- ceiling(values) } else { tryCatch( { formatFunctionName <- NULL if (!is.null(parameterName) && length(parameterName) == 1 && !is.na(parameterName)) { if (parameterName == "futilityBounds") { values[!is.na(values) & values <= -6] <- -Inf } else if (parameterName %in% c("criticalValues", "decisionCriticalValue", "overallAdjustedTestStatistics")) { design <- fieldSet if (!.isTrialDesign(design)) { design <- fieldSet[[".design"]] } if (!is.null(design) && .isTrialDesignFisher(design)) { roundDigits <- 0 } } if (!is.na(roundDigits) && roundDigits == 0) { if (inherits(fieldSet, "Dataset") && grepl("samplesize|event", tolower(parameterName))) { } else { if (inherits(fieldSet, "FieldSet")) { formatFunctionName <- fieldSet$.parameterFormatFunctions[[parameterName]] } if (is.null(formatFunctionName)) { formatFunctionName <- C_PARAMETER_FORMAT_FUNCTIONS[[parameterName]] } } } } if (!is.null(formatFunctionName)) { values <- eval(call(formatFunctionName, values)) } else { values <- .formatSummaryValues(values, digits = roundDigits, smoothedZeroFormat = smoothedZeroFormat, formatRepeatedPValues = formatRepeatedPValues ) } }, error = function(e) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to show parameter '", parameterName, "': ", e$message) } ) } return(format(values)) } .createSummaryTitleObject <- function(object) { design <- NULL designPlan <- NULL if (inherits(object, "TrialDesignCharacteristics")) { design <- object$.design } else if (.isTrialDesignPlan(object) || inherits(object, "SimulationResults")) { design <- object$.design designPlan <- object } else if (inherits(object, "AnalysisResults")) { return(.createSummaryTitleAnalysisResults(object$.design, object)) } else if (.isTrialDesign(object)) { design <- object } if (!is.null(design)) { return(.createSummaryTitleDesign(design, designPlan)) } return("") } .createSummaryTitleAnalysisResults <- function(design, analysisResults) { kMax <- design$kMax title <- "" if (kMax == 1) { title <- paste0(title, "Fixed sample analysis results") } else { title <- paste0(title, "Sequential analysis results with a maximum of ", kMax, " looks") } if (!is.null(analysisResults)) { if (.isMultiArmAnalysisResults(analysisResults)) { title <- "Multi-arm analysis results for a " } else if (.isEnrichmentAnalysisResults(analysisResults)) { title <- "Enrichment analysis results for a " } else { title <- "Analysis results for a " } if (grepl("Means", .getClassName(analysisResults$.dataInput))) { title <- paste0(title, "continuous endpoint") } else if (grepl("Rates", .getClassName(analysisResults$.dataInput))) { title <- paste0(title, "binary endpoint") } else if (grepl("Survival", .getClassName(analysisResults$.dataInput))) { title <- paste0(title, "survival endpoint") } if (.isMultiHypothesesAnalysisResults(analysisResults)) { gMax <- analysisResults$.stageResults$getGMax() if (.isMultiArmAnalysisResults(analysisResults)) { title <- paste0(title, " (", gMax, " active arms vs. control)") } else if (.isEnrichmentAnalysisResults(analysisResults)) { title <- paste0(title, " (", gMax, " populations)") } } } else if (kMax > 1) { prefix <- ifelse(design$.isDelayedResponseDesign(), "delayed response ", "") title <- .concatenateSummaryText(title, paste0("(", prefix, design$.toString(startWithUpperCase = FALSE), ")"), sep = " " ) } return(title) } .createSummaryTitleDesign <- function(design, designPlan) { kMax <- design$kMax title <- "" if (kMax == 1) { title <- paste0(title, "Fixed sample analysis") } else { title <- paste0(title, "Sequential analysis with a maximum of ", kMax, " looks") } if (!is.null(designPlan)) { if (inherits(designPlan, "SimulationResults")) { title <- "Simulation of a " } else if (designPlan$.isSampleSizeObject()) { title <- "Sample size calculation for a " } else if (designPlan$.isPowerObject()) { title <- "Power calculation for a " } if (grepl("Means", .getClassName(designPlan))) { title <- paste0(title, "continuous endpoint") } else if (grepl("Rates", .getClassName(designPlan))) { title <- paste0(title, "binary endpoint") } else if (grepl("Survival", .getClassName(designPlan))) { title <- paste0(title, "survival endpoint") } if (grepl("MultiArm", .getClassName(designPlan)) && !is.null(designPlan[["activeArms"]]) && designPlan$activeArms > 1) { title <- .concatenateSummaryText(title, "(multi-arm design)", sep = " ") } else if (grepl("Enrichment", .getClassName(designPlan))) { title <- .concatenateSummaryText(title, "(enrichment design)", sep = " ") } } else if (kMax > 1) { prefix <- ifelse(design$.isDelayedResponseDesign(), "delayed response ", "") title <- .concatenateSummaryText(title, paste0("(", prefix, design$.toString(startWithUpperCase = FALSE), ")"), sep = " " ) } return(title) } .isRatioComparisonEnabled <- function(object) { if (!is.null(object[["meanRatio"]]) && isTRUE(object[["meanRatio"]])) { return(TRUE) } if (!is.null(object[["riskRatio"]]) && isTRUE(object[["riskRatio"]])) { return(TRUE) } return(FALSE) } .getSummaryObjectSettings <- function(object) { multiArmEnabled <- grepl("MultiArm", .getClassName(object)) enrichmentEnabled <- grepl("Enrichment", .getClassName(object)) simulationEnabled <- grepl("Simulation", .getClassName(object)) ratioEnabled <- FALSE populations <- NA_integer_ if (inherits(object, "AnalysisResults") || inherits(object, "StageResults")) { groups <- object$.dataInput$getNumberOfGroups() meansEnabled <- grepl("Means", .getClassName(object$.dataInput)) ratesEnabled <- grepl("Rates", .getClassName(object$.dataInput)) survivalEnabled <- grepl("Survival", .getClassName(object$.dataInput)) } else { meansEnabled <- grepl("Means", .getClassName(object)) ratesEnabled <- grepl("Rates", .getClassName(object)) survivalEnabled <- grepl("Survival", .getClassName(object)) if (simulationEnabled && multiArmEnabled) { groups <- object$activeArms } else if (simulationEnabled && enrichmentEnabled) { groups <- 2 populations <- object$populations } else { # for analysis multi-arm / enrichment always 2 groups are applicable groups <- ifelse(multiArmEnabled || enrichmentEnabled || survivalEnabled, 2, object[["groups"]]) } ratioEnabled <- .isRatioComparisonEnabled(object) } return(list( meansEnabled = meansEnabled, ratesEnabled = ratesEnabled, survivalEnabled = survivalEnabled, groups = groups, populations = populations, multiArmEnabled = multiArmEnabled, enrichmentEnabled = enrichmentEnabled, simulationEnabled = simulationEnabled, ratioEnabled = ratioEnabled )) } .createSummaryHypothesisText <- function(object, summaryFactory) { if (!inherits(object, "AnalysisResults") && !inherits(object, "TrialDesignPlan") && !inherits(object, "SimulationResults")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'object' must be an instance of class 'AnalysisResults', 'TrialDesignPlan' ", "or 'SimulationResults' (is '", .getClassName(object), "')" ) } design <- object[[".design"]] if (is.null(design)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "'.design' must be defined in specified ", .getClassName(object)) } settings <- .getSummaryObjectSettings(object) sided <- ifelse(settings$multiArmEnabled || settings$enrichmentEnabled, 1, design$sided) directionUpper <- object[["directionUpper"]] if (is.null(directionUpper) || length(directionUpper) != 1 || is.na(directionUpper)) { directionUpper <- TRUE } comparisonH0 <- " = " comparisonH1 <- NA_character_ if (inherits(object, "AnalysisResults") && !is.null(directionUpper)) { comparisonH1 <- ifelse(sided == 2, " != ", ifelse(directionUpper, " > ", " < ")) } if (!is.null(object[["thetaH0"]])) { thetaH0 <- round(object$thetaH0, 3) } else { thetaH0 <- ifelse(settings$survivalEnabled, 1, 0) } treatmentArmIndex <- ifelse(settings$groups > 1, "(i)", "(treatment)") controlArmIndex <- ifelse(settings$groups > 1, "(i)", "(control)") if (settings$multiArmEnabled || settings$enrichmentEnabled) { if ((settings$survivalEnabled) && (settings$multiArmEnabled)) { treatmentArmIndex <- "(i)" controlArmIndex <- "" } else if ((settings$survivalEnabled) && (settings$enrichmentEnabled)) { treatmentArmIndex <- "" controlArmIndex <- "" } else if (settings$groups == 1) { treatmentArmIndex <- "(treatment)" controlArmIndex <- "(control)" } else { if (settings$enrichmentEnabled) { treatmentArmIndex <- "(treatment)" } else { treatmentArmIndex <- "(i)" } controlArmIndex <- "(control)" } } else { if (settings$groups == 1 || settings$survivalEnabled) { treatmentArmIndex <- "" controlArmIndex <- "" } else { treatmentArmIndex <- "(1)" controlArmIndex <- "(2)" } } value <- "?" if (settings$meansEnabled) { value <- "mu" } else if (settings$ratesEnabled) { value <- "pi" } else if (settings$survivalEnabled) { value <- "hazard ratio" } calcSep <- ifelse(settings$ratioEnabled, " / ", " - ") hypothesis <- "" if (!settings$survivalEnabled && (settings$multiArmEnabled || settings$enrichmentEnabled || settings$groups == 2)) { hypothesis <- paste0( hypothesis, "H0: ", value, treatmentArmIndex, calcSep, value, controlArmIndex, comparisonH0, thetaH0 ) if (!is.na(comparisonH1)) { hypothesis <- paste0(hypothesis, " against ") hypothesis <- paste0( hypothesis, "H1: ", value, treatmentArmIndex, calcSep, value, controlArmIndex, comparisonH1, thetaH0 ) } } else { hypothesis <- paste0(hypothesis, "H0: ", value, treatmentArmIndex, comparisonH0, thetaH0) if (!is.na(comparisonH1)) { hypothesis <- paste0(hypothesis, " against ") hypothesis <- paste0(hypothesis, "H1: ", value, treatmentArmIndex, comparisonH1, thetaH0) } } hypothesis <- .concatenateSummaryText( hypothesis, .createSummaryHypothesisPowerDirectionText(object, sided, directionUpper) ) return(hypothesis) } .createSummaryHypothesisPowerDirectionText <- function(object, sided, directionUpper) { if (sided == 2 || is.null(directionUpper)) { return("") } directionUpper <- unique(directionUpper) if (length(directionUpper) != 1) { return("") } if (inherits(object, "AnalysisResults")) { return("") } if (.isTrialDesignPlan(object) && object$.objectType != "power") { return("") } if (directionUpper) { return("power directed towards larger values") } else { return("power directed towards smaller values") } } .addSummaryLineBreak <- function(text, newLineLength) { maxLineLength <- as.integer(getOption("rpact.summary.width", 83)) lines <- strsplit(text, "\n", fixed = TRUE)[[1]] lastLine <- lines[length(lines)] if (nchar(lastLine) + newLineLength > maxLineLength) { text <- paste0(text, "\n") } return(text) } .concatenateSummaryText <- function(a, b, sep = ", ") { .assertIsSingleCharacter(a, "a") .assertIsSingleCharacter(b, "b") if (is.na(b) || nchar(trimws(b)) == 0) { return(a) } if (a == "") { return(b) } a <- paste0(a, sep) a <- .addSummaryLineBreak(a, nchar(b)) return(paste0(a, b)) } .createSummaryHeaderObject <- function(object, summaryFactory, digits = NA_integer_) { if (inherits(object, "TrialDesignCharacteristics")) { return(.createSummaryHeaderDesign(object$.design, NULL, summaryFactory)) } if (.isTrialDesignPlan(object) || inherits(object, "SimulationResults")) { return(.createSummaryHeaderDesign(object$.design, object, summaryFactory)) } if (inherits(object, "AnalysisResults")) { return(.createSummaryHeaderAnalysisResults(object$.design, object, summaryFactory, digits)) } if (.isTrialDesign(object)) { return(.createSummaryHeaderDesign(object, NULL, summaryFactory)) } return("") } .addAllocationRatioToHeader <- function(parameterSet, header, sep = ", ") { if (!.isTrialDesignPlanSurvival(parameterSet) && !grepl("Simulation", .getClassName(parameterSet))) { numberOfGroups <- 1 if (inherits(parameterSet, "TrialDesignPlan")) { numberOfGroups <- parameterSet$groups } else if (inherits(parameterSet, "AnalysisResults")) { numberOfGroups <- parameterSet$.dataInput$getNumberOfGroups() } if (numberOfGroups == 1) { return(header) } } prefix <- "" if (!is.null(parameterSet[["optimumAllocationRatio"]]) && length(parameterSet$optimumAllocationRatio) == 1 && parameterSet$optimumAllocationRatio) { if (length(unique(parameterSet$allocationRatioPlanned)) > 1) { return(.concatenateSummaryText(header, "optimum planned allocation ratio", sep = sep)) } prefix <- "optimum " } allocationRatioPlanned <- round(unique(parameterSet$allocationRatioPlanned), 3) if (identical(allocationRatioPlanned, 1) && prefix == "") { return(header) } if (!all(is.na(allocationRatioPlanned))) { return(.concatenateSummaryText(header, paste0( prefix, "planned allocation ratio = ", .arrayToString(allocationRatioPlanned, vectorLookAndFeelEnabled = length(allocationRatioPlanned) > 1) ), sep = sep )) } else { return(header) } } .createSummaryHeaderAnalysisResults <- function(design, analysisResults, summaryFactory, digits) { digitSettings <- .getSummaryDigits(digits) digitsGeneral <- digitSettings$digitsGeneral stageResults <- analysisResults$.stageResults dataInput <- analysisResults$.dataInput multiArmEnabled <- .isMultiArmAnalysisResults(analysisResults) enrichmentEnabled <- .isEnrichmentAnalysisResults(analysisResults) multiHypothesesEnabled <- .isMultiHypothesesAnalysisResults(analysisResults) header <- "" if (design$kMax == 1) { header <- paste0(header, "Fixed sample analysis.") } else { header <- paste0(header, "Sequential analysis with ", design$kMax, " looks") header <- .concatenateSummaryText(header, paste0("(", design$.toString(startWithUpperCase = FALSE), ")."), sep = " " ) } header <- paste0(header, "\n") header <- paste0(header, "The results were calculated using a ") if (stageResults$isDatasetMeans()) { if (dataInput$getNumberOfGroups() == 1) { header <- paste0(header, "one-sample t-test") } else if (dataInput$getNumberOfGroups() == 2) { header <- paste0(header, "two-sample t-test") } else { header <- paste0(header, "multi-arm t-test") } } else if (stageResults$isDatasetRates()) { if (dataInput$getNumberOfGroups() == 1) { header <- paste0(header, "one-sample test for rates") } else if (dataInput$getNumberOfGroups() == 2) { header <- paste0(header, "two-sample test for rates") } else { header <- paste0(header, "multi-arm test for rates") } } else if (stageResults$isDatasetSurvival()) { if (dataInput$getNumberOfGroups() == 2) { header <- paste0(header, "two-sample logrank test") } else { header <- paste0(header, "multi-arm logrank test") } } header <- .concatenateSummaryText(header, paste0("(", ifelse(design$sided == 1, "one", "two"), "-sided, alpha = ", round(design$alpha, 4), ")"), sep = " " ) if (!.isTrialDesignConditionalDunnett(design) && multiHypothesesEnabled) { if (stageResults$intersectionTest == "Dunnett") { header <- .concatenateSummaryText(header, "Dunnett intersection test") } else if (stageResults$intersectionTest == "Bonferroni") { header <- .concatenateSummaryText(header, "Bonferroni intersection test") } else if (stageResults$intersectionTest == "Simes") { header <- .concatenateSummaryText(header, "Simes intersection test") } else if (stageResults$intersectionTest == "Sidak") { header <- .concatenateSummaryText(header, "Sidak intersection test") } else if (stageResults$intersectionTest == "Hierarchical") { header <- .concatenateSummaryText(header, "Hierarchical intersection test") } else if (stageResults$intersectionTest == "SpiessensDebois") { header <- .concatenateSummaryText(header, "Spiessens and Debois intersection test") } } if (!is.null(stageResults[["normalApproximation"]]) && stageResults$normalApproximation) { header <- .concatenateSummaryText(header, "normal approximation test") } else if (stageResults$isDatasetRates()) { if (dataInput$getNumberOfGroups() == 1) { header <- .concatenateSummaryText(header, "exact test") } else { header <- .concatenateSummaryText(header, "exact test of Fisher") } } else { # header <- .concatenateSummaryText(header, "exact t test") } if (stageResults$isDatasetMeans() && multiHypothesesEnabled) { if (stageResults$varianceOption == "overallPooled") { header <- .concatenateSummaryText(header, "overall pooled variances option") } else if (stageResults$varianceOption == "pairwisePooled") { header <- .concatenateSummaryText(header, "pairwise pooled variances option") } else if (stageResults$varianceOption == "pooledFromFull") { header <- .concatenateSummaryText(header, "pooled from full population variances option") } else if (stageResults$varianceOption == "pooled") { header <- .concatenateSummaryText(header, "pooled variances option") } else if (stageResults$varianceOption == "notPooled") { header <- .concatenateSummaryText(header, "not pooled variances option") } } if (inherits(stageResults, "StageResultsMeans") && (dataInput$getNumberOfGroups() == 2)) { if (stageResults$equalVariances) { header <- .concatenateSummaryText(header, "equal variances option") } else { header <- .concatenateSummaryText(header, "unequal variances option") } } if (.isTrialDesignConditionalDunnett(design)) { if (design$secondStageConditioning) { header <- .concatenateSummaryText(header, "conditional second stage p-values") } else { header <- .concatenateSummaryText(header, "unconditional second stage p-values") } } if (enrichmentEnabled) { header <- .concatenateSummaryText(header, paste0( ifelse(analysisResults$stratifiedAnalysis, "", "non-"), "stratified analysis" )) } header <- paste0(header, ".\n", .createSummaryHypothesisText(analysisResults, summaryFactory)) if (stageResults$isDatasetMeans()) { header <- .getSummaryHeaderEntryAnalysisResults(header, analysisResults, paramName1 = "thetaH1", paramName2 = ifelse(multiHypothesesEnabled, "assumedStDevs", "assumedStDev"), paramCaption1 = "assumed effect", paramCaption2 = "assumed standard deviation", shortcut1 = "thetaH1", shortcut2 = "sd", digits1 = digitsGeneral, digits2 = digitsGeneral ) } else if (stageResults$isDatasetRates()) { header <- .getSummaryHeaderEntryAnalysisResults(header, analysisResults, paramName1 = ifelse(enrichmentEnabled, "piTreatments", ifelse(multiArmEnabled, "piTreatments", "pi1")), paramName2 = ifelse(enrichmentEnabled, "piControls", ifelse(multiArmEnabled, "piControl", "pi2")), paramCaption1 = "assumed treatment rate", paramCaption2 = "assumed control rate", shortcut1 = "pi", shortcut2 = "pi" ) } else if (stageResults$isDatasetSurvival()) { header <- .getSummaryHeaderEntryAnalysisResults(header, analysisResults, paramName1 = "thetaH1", paramCaption1 = "assumed effect", shortcut1 = "thetaH1", digits1 = digitsGeneral ) } header <- paste0(header, ".") return(header) } .getSummaryHeaderEntryValueAnalysisResults <- function(shortcut, value, analysisResults) { if (is.matrix(value)) { stage <- analysisResults$.stageResults$stage if (stage <= ncol(value)) { value <- value[, stage] } } value[!is.na(value)] <- round(value[!is.na(value)], 2) if ((is.matrix(value) && nrow(value) > 1) || length(value) > 1) { treatmentNames <- 1:length(value) if (.isEnrichmentAnalysisResults(analysisResults)) { populations <- paste0("S", treatmentNames) gMax <- analysisResults$.stageResults$getGMax() populations[treatmentNames == gMax] <- "F" treatmentNames <- populations } value <- paste0(paste(paste0(shortcut, "(", treatmentNames, ") = ", value)), collapse = ", ") } return(value) } .getSummaryHeaderEntryAnalysisResults <- function(header, analysisResults, ..., paramName1, paramName2 = NA_character_, paramCaption1, paramCaption2 = NA_character_, shortcut1, shortcut2 = NA_character_, digits1 = 2, digits2 = 2) { if (analysisResults$.design$kMax == 1) { return(header) } if (length(analysisResults$nPlanned) == 0 || all(is.na(analysisResults$nPlanned))) { return(header) } paramValue1 <- analysisResults[[paramName1]] case1 <- analysisResults$.getParameterType(paramName1) != C_PARAM_NOT_APPLICABLE && !all(is.na(paramValue1)) if (!is.na(paramCaption1) && analysisResults$.getParameterType(paramName1) == C_PARAM_GENERATED) { paramCaption1 <- sub("assumed ", "overall ", paramCaption1) } case2 <- FALSE if (!is.na(paramName2)) { paramValue2 <- analysisResults[[paramName2]] case2 <- analysisResults$.getParameterType(paramName2) != C_PARAM_NOT_APPLICABLE && !all(is.na(paramValue2)) if (!is.na(paramCaption2) && analysisResults$.getParameterType(paramName2) == C_PARAM_GENERATED) { paramCaption2 <- sub("assumed ", "overall ", paramCaption2) } } if (!case1 && !case2) { return(header) } if (.isTrialDesignFisher(analysisResults$.design) && length(analysisResults$nPlanned[!is.na(analysisResults$nPlanned)]) > 1) { header <- .concatenateSummaryText(header, paste0( "The conditional power simulation with planned sample size and ", analysisResults$iterations, " iterations is based on" ), sep = ". ") } else { header <- .concatenateSummaryText(header, "The conditional power calculation with planned sample size is based on", sep = ". " ) } header <- .addAllocationRatioToHeader(analysisResults, header, sep = " ") sepPrefix <- ifelse(length(analysisResults$allocationRatioPlanned) == 0 || identical(unique(analysisResults$allocationRatioPlanned), 1), "", ",") if (case1) { if (!any(is.na(paramValue1)) && length(unique(paramValue1)) == 1) { paramValue1 <- paramValue1[1] } if (length(paramValue1) == 1) { header <- .concatenateSummaryText(header, paste0(paramCaption1, " = ", ifelse(is.na(paramValue1), paramValue1, round(paramValue1, digits1))), sep = paste0(sepPrefix, " ") ) } else { header <- .concatenateSummaryText(header, paste0(paramCaption1, ": ", .getSummaryHeaderEntryValueAnalysisResults( shortcut1, paramValue1, analysisResults )), sep = paste0(sepPrefix, " ") ) } } if (case2) { if (length(paramValue2) == 1) { header <- .concatenateSummaryText(header, paste0(paramCaption2, " = ", ifelse(is.na(paramValue2), paramValue2, round(paramValue2, digits2))), sep = ifelse(case1, paste0(sepPrefix, " and "), " ") ) } else { header <- .concatenateSummaryText(header, paste0(paramCaption2, ": ", .getSummaryHeaderEntryValueAnalysisResults( shortcut2, paramValue2, analysisResults )), sep = ifelse(case1, paste0(sepPrefix, " and "), " ") ) } } return(header) } .addEnrichmentEffectListToHeader <- function(header, designPlan) { if (!grepl("SimulationResultsEnrichment", .getClassName(designPlan)) || is.null(designPlan[["effectList"]])) { return(header) } subGroups <- designPlan$effectList$subGroups header <- .concatenateSummaryText(header, paste0( "subgroup", ifelse(length(subGroups) != 1, "s", ""), " = ", .arrayToString(subGroups, vectorLookAndFeelEnabled = TRUE) )) prevalences <- designPlan$effectList$prevalences header <- .concatenateSummaryText(header, paste0( "prevalence", ifelse(length(prevalences) != 1, "s", ""), " = ", .arrayToString(round(prevalences, 3), vectorLookAndFeelEnabled = TRUE) )) if (!is.null(designPlan$effectList[["piControls"]])) { piControls <- designPlan$effectList$piControls if (length(piControls) > 0) { if (length(unique(piControls)) == 1) { piControls <- piControls[1] } controlRateText <- paste0( "control rate", ifelse(length(piControls) == 1, "", "s"), " pi(control) = ", .arrayToString(round(piControls, 3), vectorLookAndFeelEnabled = (length(unique(piControls)) > 1)) ) header <- .concatenateSummaryText(header, controlRateText) } } return(header) } .createSummaryHeaderDesign <- function(design, designPlan, summaryFactory) { if (is.null(designPlan)) { if (.isTrialDesignFisher(design)) { designType <- "Fisher's combination test" } else if (.isTrialDesignConditionalDunnett(design)) { designType <- "Conditional Dunnett test" } else { designType <- C_TYPE_OF_DESIGN_LIST[[design$typeOfDesign]] } header <- .firstCharacterToUpperCase(designType) header <- paste0(header, " design") if (design$.isDelayedResponseDesign()) { header <- paste0(header, " with delayed response") } if (design$kMax > 1 && .isTrialDesignInverseNormalOrGroupSequential(design)) { if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT) { header <- .concatenateSummaryText(header, paste0("(deltaWT = ", round(design$deltaWT, 3), ")"), sep = " " ) } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_WT_OPTIMUM) { header <- .concatenateSummaryText(header, paste0("(", design$optimizationCriterion, ", deltaWT = ", round(design$deltaWT, 3), ")"), sep = " " ) } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_PT) { header <- .concatenateSummaryText(header, paste0("(deltaPT1 = ", round(design$deltaPT1, 3), ""), sep = " " ) header <- .concatenateSummaryText(header, paste0("deltaPT0 = ", round(design$deltaPT0, 3), ")"), sep = ", " ) } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_HP) { header <- .concatenateSummaryText(header, paste0("(constant bounds = ", round(design$constantBoundsHP, 3), ")"), sep = " " ) } else if (design$typeOfDesign %in% c(C_TYPE_OF_DESIGN_AS_KD, C_TYPE_OF_DESIGN_AS_HSD)) { header <- .concatenateSummaryText(header, paste0("(gammaA = ", round(design$gammaA, 3), ")"), sep = " " ) } else if (design$typeOfDesign == C_TYPE_OF_DESIGN_AS_USER) { header <- .concatenateSummaryText(header, paste0("(", .arrayToString(round(design$userAlphaSpending, 3)), ")"), sep = " " ) } if (grepl("^as", design$typeOfDesign) && design$typeBetaSpending != C_TYPE_OF_DESIGN_BS_NONE) { typeBetaSpending <- C_TYPE_OF_DESIGN_BS_LIST[[design$typeBetaSpending]] header <- .concatenateSummaryText(header, typeBetaSpending, sep = " and ") if (design$typeBetaSpending %in% c(C_TYPE_OF_DESIGN_BS_KD, C_TYPE_OF_DESIGN_BS_HSD)) { header <- .concatenateSummaryText(header, paste0("(gammaB = ", round(design$gammaB, 3), ")"), sep = " " ) } else if (design$typeBetaSpending == C_TYPE_OF_DESIGN_BS_USER) { header <- .concatenateSummaryText(header, paste0("(", .arrayToString(round(design$userBetaSpending, 3)), ")"), sep = " " ) } } } if (!.isDelayedInformationEnabled(design = design) && ((.isTrialDesignInverseNormalOrGroupSequential(design) && any(design$futilityBounds > -6, na.rm = TRUE)) || (.isTrialDesignFisher(design) && any(design$alpha0Vec < 1)))) { header <- .concatenateSummaryText( header, paste0(ifelse(design$bindingFutility, "binding", "non-binding"), " futility") ) } header <- .concatenateSummaryText(header, paste0( ifelse(design$sided == 1, "one-sided", "two-sided"), ifelse(design$kMax == 1, "", " overall") )) header <- .concatenateSummaryText(header, paste0("significance level ", round(100 * design$alpha, 2), "%"), sep = " " ) if (.isTrialDesignInverseNormalOrGroupSequential(design)) { header <- .concatenateSummaryText(header, paste0("power ", round(100 * (1 - design$beta), 1), "%")) } header <- .concatenateSummaryText(header, "undefined endpoint") if (design$kMax > 1 && .isTrialDesignInverseNormalOrGroupSequential(design)) { outputSize <- getOption("rpact.summary.output.size", C_SUMMARY_OUTPUT_SIZE_DEFAULT) designCharacteristics <- NULL tryCatch( { designCharacteristics <- getDesignCharacteristics(design) }, error = function(e) { .logError("Cannot add design characteristics to summary: ", e$message) } ) if (!is.null(designCharacteristics)) { header <- .concatenateSummaryText( header, paste0("inflation factor ", round(designCharacteristics$inflationFactor, 4)) ) if (outputSize == "large") { header <- .concatenateSummaryText( header, paste0("ASN H1 ", round(designCharacteristics$averageSampleNumber1, 4)) ) header <- .concatenateSummaryText( header, paste0("ASN H01 ", round(designCharacteristics$averageSampleNumber01, 4)) ) header <- .concatenateSummaryText( header, paste0("ASN H0 ", round(designCharacteristics$averageSampleNumber0, 4)) ) } } } header <- paste0(header, ".") return(header) } header <- "" if (design$kMax == 1) { header <- paste0(header, "Fixed sample analysis,") } else { header <- paste0(header, "Sequential analysis with a maximum of ", design$kMax, " looks") prefix <- ifelse(design$.isDelayedResponseDesign(), "delayed response ", "") header <- .concatenateSummaryText(header, paste0("(", prefix, design$.toString(startWithUpperCase = FALSE), ")"), sep = " " ) } header <- .concatenateSummaryText(header, ifelse(design$kMax == 1, "", "overall")) header <- .concatenateSummaryText(header, paste0("significance level ", round(100 * design$alpha, 2), "%"), sep = " " ) header <- .concatenateSummaryText(header, ifelse(design$sided == 1, "(one-sided).", "(two-sided)."), sep = " ") header <- paste0(header, "\n") header <- paste0(header, "The ", ifelse(inherits(designPlan, "SimulationResults") || designPlan$.isPowerObject(), "results were ", "sample size was ")) header <- paste0(header, ifelse(inherits(designPlan, "SimulationResults"), "simulated", "calculated")) header <- paste0(header, " for a ") settings <- .getSummaryObjectSettings(designPlan) if (settings$meansEnabled) { if (settings$multiArmEnabled && settings$groups > 1) { header <- .concatenateSummaryText(header, "multi-arm comparisons for means", sep = "") } else if (settings$enrichmentEnabled && settings$populations > 1) { header <- .concatenateSummaryText(header, "population enrichment comparisons for means", sep = "") } else if (settings$groups == 1 && !settings$multiArmEnabled) { header <- .concatenateSummaryText(header, "one-sample t-test", sep = "") } else if (settings$groups == 2 || settings$multiArmEnabled) { header <- .concatenateSummaryText(header, "two-sample t-test", sep = "") } } else if (settings$ratesEnabled) { if (settings$multiArmEnabled && settings$groups > 1) { header <- .concatenateSummaryText(header, "multi-arm comparisons for rates", sep = "") } else if (settings$enrichmentEnabled && settings$populations > 1) { header <- .concatenateSummaryText(header, "population enrichment comparisons for rates", sep = "") } else if (settings$groups == 1 && !settings$multiArmEnabled) { header <- .concatenateSummaryText(header, "one-sample test for rates", sep = "") } else if (settings$groups == 2 || settings$multiArmEnabled) { header <- .concatenateSummaryText(header, "two-sample test for rates", sep = "") } } else if (settings$survivalEnabled) { if (settings$multiArmEnabled && settings$groups > 1) { header <- .concatenateSummaryText(header, "multi-arm logrank test", sep = "") } else if (settings$enrichmentEnabled && settings$populations > 1) { header <- .concatenateSummaryText(header, "population enrichment logrank test", sep = "") } else if (settings$groups == 2 || settings$multiArmEnabled) { header <- .concatenateSummaryText(header, "two-sample logrank test", sep = "") } } part <- "" if (settings$multiArmEnabled && settings$groups > 1) { part <- .concatenateSummaryText(part, paste0(settings$groups, " treatments vs. control")) } else if (settings$enrichmentEnabled) { if (settings$groups == 2) { part <- .concatenateSummaryText(part, "treatment vs. control") } else if (settings$groups > 2) { part <- .concatenateSummaryText(part, paste0(settings$groups, " treatments vs. control")) } part <- .concatenateSummaryText(part, paste0( settings$populations, " population", ifelse(settings$populations == 1, "", "s") )) } if (!is.null(designPlan) && (.isTrialDesignPlan(designPlan) || inherits(designPlan, "SimulationResults")) && !settings$multiArmEnabled && !settings$enrichmentEnabled && !settings$survivalEnabled) { if (settings$ratesEnabled) { if (settings$groups == 1) { part <- .concatenateSummaryText(part, ifelse(designPlan$normalApproximation, "normal approximation", "exact test" )) } else { part <- .concatenateSummaryText(part, ifelse(designPlan$normalApproximation, "normal approximation", "exact test of Fisher" )) } } else if (designPlan$normalApproximation) { part <- .concatenateSummaryText(part, "normal approximation") } } if (part != "") { header <- .concatenateSummaryText(header, paste0("(", part, ")"), sep = " ") } if (settings$meansEnabled && (.isTrialDesignInverseNormalOrGroupSequential(design) || inherits(designPlan, "SimulationResults"))) { header <- .concatenateSummaryText(header, .createSummaryHypothesisText(designPlan, summaryFactory)) if (!is.null(designPlan[["alternative"]]) && length(designPlan$alternative) == 1) { alternativeText <- paste0("H1: effect = ", round(designPlan$alternative, 3)) } else if (!is.null(designPlan[["muMaxVector"]]) && length(designPlan$muMaxVector) == 1) { alternativeText <- paste0("H1: mu_max = ", round(designPlan$muMaxVector, 3)) } else if (!is.null(designPlan[["effectList"]]) && !is.null(designPlan$effectList[["effects"]]) && isTRUE(nrow(designPlan$effectList$effects) == 1)) { alternativeText <- paste0( "H1: effects = ", .arrayToString(designPlan$effectList$effects, mode = "vector") ) } else { alternativeText <- "H1: effect as specified" } header <- .concatenateSummaryText(header, alternativeText) header <- .addEnrichmentEffectListToHeader(header, designPlan) if (grepl("SimulationResultsEnrichment", .getClassName(designPlan))) { stDevs <- designPlan$effectList$stDevs if (length(unique(stDevs)) == 1) { stDevs <- unique(stDevs) } s <- ifelse(length(stDevs) != 1, "s", "") stDevCaption <- ifelse(.isRatioComparisonEnabled(designPlan), paste0("coefficient", s, " of variation"), paste0("standard deviation", s) ) header <- .concatenateSummaryText(header, paste0( stDevCaption, " = ", .arrayToString(round(stDevs, 3), vectorLookAndFeelEnabled = TRUE) )) } else { stDevCaption <- ifelse(.isRatioComparisonEnabled(designPlan), "coefficient of variation", "standard deviation") header <- .concatenateSummaryText(header, paste0(stDevCaption, " = ", round(designPlan$stDev, 3))) } header <- .addAdditionalArgumentsToHeader(header, designPlan, settings) } else if (settings$ratesEnabled && (.isTrialDesignInverseNormalOrGroupSequential(design) || inherits(designPlan, "SimulationResults"))) { if (settings$groups == 1) { if (!is.null(designPlan[["pi1"]]) && length(designPlan$pi1) == 1) { treatmentRateText <- paste0("H1: treatment rate pi = ", round(designPlan$pi1, 3)) } else { treatmentRateText <- "H1: treatment rate pi as specified" } header <- paste0(header, ",\n", .createSummaryHypothesisText(designPlan, summaryFactory)) header <- .concatenateSummaryText(header, treatmentRateText) header <- .addAdditionalArgumentsToHeader(header, designPlan, settings) } else { if (!is.null(designPlan[["pi1"]]) && length(designPlan$pi1) == 1) { treatmentRateText <- paste0("H1: treatment rate pi(1) = ", round(designPlan$pi1, 3)) } else if (!is.null(designPlan[["piMaxVector"]]) && length(designPlan$piMaxVector) == 1) { treatmentRateText <- paste0( "H1: treatment rate pi_max = ", .arrayToString(round(designPlan$piMaxVector, 3), vectorLookAndFeelEnabled = TRUE) ) } else if (settings$enrichmentEnabled && !is.null(designPlan[["effectList"]]) && !is.null(designPlan$effectList[["piTreatments"]])) { piTreatments <- designPlan$effectList[["piTreatments"]] if (is.matrix(piTreatments) && nrow(piTreatments) == 1) { treatmentRateText <- paste0( "H1: assumed treatment rate pi(treatment) = ", .arrayToString(round(designPlan$effectList$piTreatments, 3), vectorLookAndFeelEnabled = TRUE) ) } else { treatmentRateText <- paste0("H1: assumed treatment rate pi(treatment) as specified") } } else { treatmentRateText <- paste0( "H1: treatment rate pi", ifelse(settings$multiArmEnabled, "_max", "(1)"), " as specified" ) } controlRateText <- NA_character_ if (settings$multiArmEnabled && !is.null(designPlan[["piControl"]])) { controlRateText <- paste0("control rate pi(control) = ", round(designPlan$piControl, 3)) } else if (settings$enrichmentEnabled && !is.null(designPlan[["piControls"]])) { controlRateText <- paste0( "control rates pi(control) = ", .arrayToString(round(designPlan$piControls, 3), vectorLookAndFeelEnabled = TRUE) ) } else if (settings$enrichmentEnabled && !is.null(designPlan[["effectList"]]) && !is.null(designPlan$effectList[["piControls"]])) { # controlRateText will be created in .addEnrichmentEffectListToHeader() } else if (!is.null(designPlan[["pi2"]])) { controlRateText <- paste0("control rate pi(2) = ", round(designPlan$pi2, 3)) } else { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "failed to identify case to build ", sQuote("controlRateText")) } header <- paste0(header, ",\n", .createSummaryHypothesisText(designPlan, summaryFactory)) header <- .concatenateSummaryText(header, treatmentRateText) if (!is.na(controlRateText)) { header <- .concatenateSummaryText(header, controlRateText) } header <- .addEnrichmentEffectListToHeader(header, designPlan) header <- .addAdditionalArgumentsToHeader(header, designPlan, settings) } } else if (settings$survivalEnabled && (.isTrialDesignInverseNormalOrGroupSequential(design) || inherits(designPlan, "SimulationResults"))) { parameterNames <- designPlan$.getVisibleFieldNamesOrdered() numberOfVariants <- .getMultidimensionalNumberOfVariants(designPlan, parameterNames) if (grepl("SimulationResultsEnrichment", .getClassName(designPlan))) { userDefinedParam <- "hazardRatios" paramName <- "hazard ratios" paramValue <- designPlan$effectList$hazardRatios } else { userDefinedParam <- "pi1" for (param in c("pi1", "lambda1", "median1", "hazardRatio")) { if (designPlan$.getParameterType(param) == C_PARAM_USER_DEFINED && length(designPlan[[param]]) == numberOfVariants) { userDefinedParam <- param } } paramValue <- designPlan[[userDefinedParam]] if (is.null(paramValue) || length(paramValue) == 0 || all(is.na(paramValue))) { userDefinedParam <- "hazardRatio" } paramName <- "treatment pi(1)" if (userDefinedParam == "lambda1") { paramName <- "treatment lambda(1)" } else if (userDefinedParam == "median1") { paramName <- "treatment median(1)" } else if (userDefinedParam == "hazardRatio") { paramName <- ifelse(grepl("SimulationResultsMultiArm", .getClassName(designPlan)), "omega_max", "hazard ratio") } } if (length(designPlan[[userDefinedParam]]) == 1) { treatmentRateText <- paste0("H1: ", paramName, " = ", round(designPlan[[userDefinedParam]], 3)) } else if (!is.null(designPlan[["omegaMaxVector"]]) && length(designPlan$omegaMaxVector) == 1) { treatmentRateText <- paste0("H1: omega_max = ", round(designPlan$omegaMaxVector, 3)) } else if (!is.null(designPlan[["hazardRatio"]]) && (length(designPlan$hazardRatio) == 1) || (inherits(designPlan, "SimulationResults") && !is.null(designPlan[[".piecewiseSurvivalTime"]]) && designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled)) { treatmentRateText <- paste0( "H1: hazard ratio = ", .arrayToString(round(designPlan$hazardRatio, 3), vectorLookAndFeelEnabled = TRUE) ) } else if (settings$enrichmentEnabled && !is.null(designPlan[["effectList"]]) && !is.null(designPlan$effectList[["hazardRatios"]]) && is.matrix(designPlan$effectList$hazardRatios) && nrow(designPlan$effectList$hazardRatios) == 1) { treatmentRateText <- paste0( "H1: hazard ratios = ", .arrayToString(round(designPlan$effectList$hazardRatios, 3), vectorLookAndFeelEnabled = TRUE) ) } else { treatmentRateText <- paste0("H1: ", paramName, " as specified") } if (userDefinedParam %in% c("hazardRatio", "pi1") && (designPlan$.getParameterType("pi2") == C_PARAM_USER_DEFINED || designPlan$.getParameterType("pi2") == C_PARAM_DEFAULT_VALUE) && length(designPlan$pi2) == 1) { treatmentRateText <- paste0(treatmentRateText, ", control pi(2) = ", round(designPlan$pi2, 3)) } else if (userDefinedParam %in% c("hazardRatio", "lambda1") && (designPlan$.getParameterType("lambda2") == C_PARAM_USER_DEFINED || designPlan$.getParameterType("lambda2") == C_PARAM_DEFAULT_VALUE) && length(designPlan$lambda2) == 1) { treatmentRateText <- paste0(treatmentRateText, ", control lambda(2) = ", round(designPlan$lambda2, 3)) } else if (userDefinedParam %in% c("hazardRatio", "median1") && (designPlan$.getParameterType("median2") == C_PARAM_USER_DEFINED || designPlan$.getParameterType("median2") == C_PARAM_GENERATED) && length(designPlan$median2) == 1) { treatmentRateText <- paste0(treatmentRateText, ", control median(2) = ", round(designPlan$median2, 3)) } else if (!is.null(designPlan[[".piecewiseSurvivalTime"]]) && designPlan$.piecewiseSurvivalTime$piecewiseSurvivalEnabled) { treatmentRateText <- paste0(treatmentRateText, ", piecewise survival distribution") treatmentRateText <- paste0( treatmentRateText, ", \n", "piecewise survival time = ", .arrayToString(round(designPlan$piecewiseSurvivalTime, 4), vectorLookAndFeelEnabled = TRUE), ", \n", "control lambda(2) = ", .arrayToString(round(designPlan$lambda2, 4), vectorLookAndFeelEnabled = TRUE) ) } header <- paste0(header, ", \n", .createSummaryHypothesisText(designPlan, summaryFactory)) header <- .concatenateSummaryText(header, treatmentRateText) header <- .addEnrichmentEffectListToHeader(header, designPlan) header <- .addAdditionalArgumentsToHeader(header, designPlan, settings) } if (!inherits(designPlan, "SimulationResults") && designPlan$.isSampleSizeObject()) { header <- .concatenateSummaryText(header, paste0("power ", round(100 * (1 - design$beta), 1), "%")) } if (inherits(designPlan, "SimulationResults")) { header <- .concatenateSummaryText(header, paste0("simulation runs = ", designPlan$maxNumberOfIterations)) header <- .concatenateSummaryText(header, paste0("seed = ", designPlan$seed)) } header <- paste0(header, ".") return(header) } .addAdditionalArgumentsToHeader <- function(header, designPlan, settings) { if (designPlan$.design$kMax > 1) { if (settings$survivalEnabled) { if (!is.null(designPlan[["plannedEvents"]])) { header <- .concatenateSummaryText(header, paste0( "planned cumulative events = ", .arrayToString(designPlan$plannedEvents, vectorLookAndFeelEnabled = (length(designPlan$plannedEvents) > 1) ) )) } } else { if (!is.null(designPlan[["plannedSubjects"]])) { header <- .concatenateSummaryText(header, paste0( "planned cumulative sample size = ", .arrayToString(designPlan$plannedSubjects, vectorLookAndFeelEnabled = (length(designPlan$plannedSubjects) > 1) ) )) } } if (!is.null(designPlan[["maxNumberOfSubjects"]]) && designPlan$.getParameterType("maxNumberOfSubjects") == C_PARAM_USER_DEFINED) { header <- .concatenateSummaryText(header, paste0( "maximum number of subjects = ", ceiling(designPlan$maxNumberOfSubjects[1]) )) } if (settings$survivalEnabled) { if (!is.null(designPlan[["maxNumberOfEvents"]]) && designPlan$.getParameterType("maxNumberOfEvents") == C_PARAM_USER_DEFINED) { header <- .concatenateSummaryText(header, paste0( "maximum number of events = ", ceiling(designPlan$maxNumberOfEvents[1]) )) } } } else { if (settings$survivalEnabled) { if (!is.null(designPlan[["plannedEvents"]])) { header <- .concatenateSummaryText(header, paste0( "planned events = ", .arrayToString(designPlan$plannedEvents, vectorLookAndFeelEnabled = (length(designPlan$plannedEvents) > 1) ) )) } } else { if (!is.null(designPlan[["plannedSubjects"]])) { header <- .concatenateSummaryText(header, paste0( "planned sample size = ", .arrayToString(designPlan$plannedSubjects, vectorLookAndFeelEnabled = (length(designPlan$plannedSubjects) > 1) ) )) } } if (!is.null(designPlan[["maxNumberOfSubjects"]]) && designPlan$.getParameterType("maxNumberOfSubjects") == C_PARAM_USER_DEFINED) { header <- .concatenateSummaryText(header, paste0( "number of subjects = ", ceiling(designPlan$maxNumberOfSubjects[1]) )) } if (settings$survivalEnabled) { if (!is.null(designPlan[["maxNumberOfEvents"]]) && designPlan$.getParameterType("maxNumberOfEvents") == C_PARAM_USER_DEFINED) { header <- .concatenateSummaryText(header, paste0( "number of events = ", designPlan$maxNumberOfEvents[1] )) } } } header <- .addAllocationRatioToHeader(designPlan, header) if (settings$survivalEnabled) { if (!is.null(designPlan[["eventTime"]]) && !is.na(designPlan[["eventTime"]])) { header <- .concatenateSummaryText(header, paste0( "event time = ", .arrayToString(designPlan$eventTime, vectorLookAndFeelEnabled = (length(designPlan$eventTime) > 1) ) )) } if (!is.null(designPlan[["accrualTime"]])) { header <- .concatenateSummaryText(header, paste0( "accrual time = ", .arrayToString(designPlan$accrualTime, vectorLookAndFeelEnabled = (length(designPlan$accrualTime) > 1) ) )) } if (!is.null(designPlan[["accrualTime"]]) && length(designPlan$accrualIntensity) == length(designPlan$accrualTime)) { header <- .concatenateSummaryText(header, paste0( "accrual intensity = ", .arrayToString(designPlan$accrualIntensity, digits = 1, vectorLookAndFeelEnabled = (length(designPlan$accrualIntensity) > 1) ) )) } if (!is.null(designPlan[["dropoutTime"]])) { if (designPlan$dropoutRate1 > 0 || designPlan$dropoutRate2 > 0) { header <- .concatenateSummaryText(header, paste0( "dropout rate(1) = ", .arrayToString(designPlan$dropoutRate1, vectorLookAndFeelEnabled = (length(designPlan$dropoutRate1) > 1) ) )) header <- .concatenateSummaryText(header, paste0( "dropout rate(2) = ", .arrayToString(designPlan$dropoutRate2, vectorLookAndFeelEnabled = (length(designPlan$dropoutRate2) > 1) ) )) header <- .concatenateSummaryText(header, paste0( "dropout time = ", .arrayToString(designPlan$dropoutTime, vectorLookAndFeelEnabled = (length(designPlan$dropoutTime) > 1) ) )) } } } if (settings$multiArmEnabled && designPlan$activeArms > 1) { header <- .addShapeToHeader(header, designPlan) header <- .addSelectionToHeader(header, designPlan) } if (settings$enrichmentEnabled && settings$populations > 1) { header <- .addSelectionToHeader(header, designPlan) } functionName <- ifelse(settings$survivalEnabled, "calcEventsFunction", "calcSubjectsFunction") userDefinedFunction <- !is.null(designPlan[[functionName]]) && designPlan$.getParameterType(functionName) == C_PARAM_USER_DEFINED if (userDefinedFunction || (!is.null(designPlan[["conditionalPower"]]) && !is.na(designPlan$conditionalPower))) { if (userDefinedFunction) { header <- .concatenateSummaryText( header, paste0("sample size reassessment: user defined '", functionName, "'") ) if ((!is.null(designPlan[["conditionalPower"]]) && !is.na(designPlan$conditionalPower))) { header <- .concatenateSummaryText( header, paste0("conditional power = ", designPlan$conditionalPower) ) } } else { if ((!is.null(designPlan[["conditionalPower"]]) && !is.na(designPlan$conditionalPower))) { header <- .concatenateSummaryText( header, paste0("sample size reassessment: conditional power = ", designPlan$conditionalPower) ) } } paramName1 <- ifelse(settings$survivalEnabled, "minNumberOfEventsPerStage", "minNumberOfSubjectsPerStage") paramName2 <- ifelse(settings$survivalEnabled, "maxNumberOfEventsPerStage", "maxNumberOfSubjectsPerStage") paramCaption <- ifelse(settings$survivalEnabled, "events", "subjects") if (!is.null(designPlan[[paramName1]])) { header <- .concatenateSummaryText(header, paste0( "minimum ", paramCaption, " per stage = ", .arrayToString(designPlan[[paramName1]], vectorLookAndFeelEnabled = (length(designPlan[[paramName1]]) > 1) ) )) } if (!is.null(designPlan[[paramName2]])) { header <- .concatenateSummaryText(header, paste0( "maximum ", paramCaption, " per stage = ", .arrayToString(designPlan[[paramName2]], vectorLookAndFeelEnabled = (length(designPlan[[paramName2]]) > 1) ) )) } if (settings$meansEnabled) { if (!is.na(designPlan$thetaH1)) { header <- .concatenateSummaryText( header, paste0("theta H1 = ", round(designPlan$thetaH1, 3)) ) } if (!is.na(designPlan$stDevH1)) { header <- .concatenateSummaryText( header, paste0("standard deviation H1 = ", round(designPlan$stDevH1, 3)) ) } } else if (settings$ratesEnabled) { if (settings$multiArmEnabled || settings$enrichmentEnabled) { if (settings$multiArmEnabled && !is.na(designPlan$piTreatmentsH1)) { header <- .concatenateSummaryText( header, paste0("pi(treatment)H1 = ", round(designPlan$piTreatmentsH1, 3)) ) } else if (settings$enrichmentEnabled) { piTreatmentH1 <- designPlan[["piTreatmentH1"]] if (is.null(piTreatmentH1)) { piTreatmentH1 <- designPlan[["piTreatmentsH1"]] } if (!is.null(piTreatmentH1) && !is.na(piTreatmentH1)) { header <- .concatenateSummaryText( header, paste0("pi(treatment)H1 = ", round(piTreatmentH1, 3)) ) } } if (!is.na(designPlan$piControlH1)) { header <- .concatenateSummaryText( header, paste0("pi(control)H1 = ", round(designPlan$piControlH1, 3)) ) } } else { if (!is.na(designPlan$pi1H1)) { header <- .concatenateSummaryText( header, paste0("pi(treatment)H1 = ", round(designPlan$pi1H1, 3)) ) } if (!is.na(designPlan$pi2H1)) { header <- .concatenateSummaryText( header, paste0("pi(control)H1 = ", round(designPlan$pi2H1, 3)) ) } } } if (settings$survivalEnabled && !is.null(designPlan[["thetaH1"]]) && !is.na(designPlan$thetaH1)) { header <- .concatenateSummaryText(header, paste0("thetaH1 = ", round(designPlan$thetaH1, 3))) } } return(header) } .addShapeToHeader <- function(header, designPlan) { header <- .concatenateSummaryText(header, paste0("effect shape = ", .formatCamelCase(designPlan$typeOfShape))) if (designPlan$typeOfShape == "sigmoidEmax") { header <- .concatenateSummaryText(header, paste0("slope = ", designPlan$slope)) header <- .concatenateSummaryText(header, paste0("ED50 = ", designPlan$gED50)) } return(header) } .addSelectionToHeader <- function(header, designPlan) { header <- .concatenateSummaryText(header, paste0("intersection test = ", designPlan$intersectionTest)) if (designPlan$.design$kMax > 1) { typeOfSelectionText <- paste0("selection = ", .formatCamelCase(designPlan$typeOfSelection)) if (designPlan$typeOfSelection == "rBest") { typeOfSelectionText <- paste0(typeOfSelectionText, ", r = ", designPlan$rValue) } else if (designPlan$typeOfSelection == "epsilon") { typeOfSelectionText <- paste0(typeOfSelectionText, " rule, eps = ", designPlan$epsilonValue) } if (!is.null(designPlan$threshold) && length(designPlan$threshold) == 1 && designPlan$threshold > -Inf) { typeOfSelectionText <- paste0(typeOfSelectionText, ", threshold = ", designPlan$threshold) } header <- .concatenateSummaryText(header, typeOfSelectionText) header <- .concatenateSummaryText( header, paste0("effect measure based on ", .formatCamelCase(designPlan$effectMeasure)) ) } header <- .concatenateSummaryText( header, paste0("success criterion: ", .formatCamelCase(designPlan$successCriterion)) ) return(header) } .createSummary <- function(object, digits = NA_integer_, output = c("all", "title", "overview", "body")) { output <- match.arg(output) if (inherits(object, "TrialDesignCharacteristics")) { return(.createSummaryDesignPlan(object, digits = digits, output = output, showStageLevels = TRUE)) } if (.isTrialDesign(object) || .isTrialDesignPlan(object) || inherits(object, "SimulationResults")) { return(.createSummaryDesignPlan(object, digits = digits, output = output, showStageLevels = !.isTrialDesignPlan(object))) } if (inherits(object, "AnalysisResults")) { return(.createSummaryAnalysisResults(object, digits = digits, output = output)) } if (inherits(object, "PerformanceScore")) { return(.createSummaryPerformanceScore(object, digits = digits, output = output)) } stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, "function 'summary' not implemented yet for class ", .getClassName(object)) } .createSummaryPerformanceScore <- function(object, digits = NA_integer_, output = c("all", "title", "overview", "body")) { .createSummaryDesignPlan(object$.simulationResults, digits = digits, output = output, showStageLevels = TRUE, performanceScore = object ) } .getSummaryParameterCaptionCriticalValues <- function(design) { parameterCaption <- ifelse(.isTrialDesignFisher(design), "Efficacy boundary (p product scale)", "Efficacy boundary (z-value scale)" ) parameterCaption <- ifelse(.isDelayedInformationEnabled(design = design), "Upper bounds of continuation", parameterCaption ) return(parameterCaption) } .getSummaryParameterCaptionFutilityBounds <- function(design) { bindingInfo <- ifelse(design$bindingFutility, "binding", "non-binding") parameterCaption <- ifelse(.isDelayedInformationEnabled(design = design), paste0("Lower bounds of continuation (", bindingInfo, ")"), paste0("Futility boundary (z-value scale)") ) return(parameterCaption) } #' #' Main function for creating a summary of an analysis result #' #' @noRd #' .createSummaryAnalysisResults <- function(object, digits = NA_integer_, output = c("all", "title", "overview", "body")) { output <- match.arg(output) if (!inherits(object, "AnalysisResults")) { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'object' must be a valid analysis result object (is class ", .getClassName(object), ")" ) } digitSettings <- .getSummaryDigits(digits) digits <- digitSettings$digits digitsSampleSize <- digitSettings$digitsSampleSize digitsGeneral <- digitSettings$digitsGeneral digitsProbabilities <- digitSettings$digitsProbabilities outputSize <- getOption("rpact.summary.output.size", C_SUMMARY_OUTPUT_SIZE_DEFAULT) intervalFormat <- getOption("rpact.summary.intervalFormat", "[%s; %s]") .assertIsValidSummaryIntervalFormat(intervalFormat) multiArmEnabled <- .isMultiArmAnalysisResults(object) enrichmentEnabled <- .isEnrichmentAnalysisResults(object) multiHypothesesEnabled <- .isMultiHypothesesAnalysisResults(object) analysisResults <- object design <- analysisResults$.design stageResults <- analysisResults$.stageResults dataInput <- analysisResults$.dataInput closedTestResults <- NULL conditionalPowerResults <- NULL if (multiHypothesesEnabled) { closedTestResults <- analysisResults$.closedTestResults if (length(analysisResults$nPlanned) > 0 && !all(is.na(analysisResults$nPlanned))) { conditionalPowerResults <- analysisResults$.conditionalPowerResults } } summaryFactory <- SummaryFactory(object = object, intervalFormat = intervalFormat, output = output) .addDesignInformationToSummary(design, object, summaryFactory, output = output) if (!.isTrialDesignConditionalDunnett(design)) { summaryFactory$addParameter(design, parameterName = "criticalValues", parameterCaption = .getSummaryParameterCaptionCriticalValues(design), roundDigits = digitsProbabilities - ifelse(.isTrialDesignFisher(design) || digitsProbabilities <= 1, 0, 1), smoothedZeroFormat = !.isTrialDesignFisher(design) ) } if (.isTrialDesignFisher(design)) { if (any(design$alpha0Vec < 1)) { summaryFactory$addParameter(design, parameterName = "alpha0Vec", parameterCaption = "Futility boundary (separate p-value scale)", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } } else if (!.isTrialDesignConditionalDunnett(design)) { if (any(design$futilityBounds > -6)) { summaryFactory$addParameter(design, parameterName = "futilityBounds", parameterCaption = .getSummaryParameterCaptionFutilityBounds(design), roundDigits = ifelse(digitsProbabilities > 1, digitsProbabilities - 1, digitsProbabilities), smoothedZeroFormat = TRUE ) } } if (design$kMax > 1 && !.isTrialDesignConditionalDunnett(design)) { summaryFactory$addParameter(design, parameterName = "alphaSpent", parameterCaption = "Cumulative alpha spent", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } if (!.isTrialDesignConditionalDunnett(design)) { summaryFactory$addParameter(design, parameterName = "stageLevels", parameterCaption = "Stage level", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } summaryFactory$addParameter(stageResults, parameterName = "effectSizes", parameterCaption = ifelse(stageResults$isDatasetRates() && dataInput$getNumberOfGroups() == 1, "Cumulative treatment rate", "Cumulative effect size" ), roundDigits = digitsGeneral ) if (stageResults$isDatasetMeans()) { parameterCaption <- ifelse(stageResults$isOneSampleDataset(), "Cumulative standard deviation", "Cumulative (pooled) standard deviation" ) parameterName <- ifelse(inherits(stageResults, "StageResultsMultiArmMeans") && !inherits(stageResults, "StageResultsEnrichmentMeans"), "overallPooledStDevs", "overallStDevs" ) summaryFactory$addParameter(stageResults, parameterName = parameterName, parameterCaption = parameterCaption, roundDigits = digitsGeneral, enforceFirstCase = (parameterName == "overallPooledStDevs") ) } else if (stageResults$isDatasetRates()) { if (outputSize != "small" && dataInput$getNumberOfGroups() > 1) { treatmentRateParamName <- "overallPi1" controlRateParamName <- "overallPi2" if (.isEnrichmentStageResults(stageResults)) { treatmentRateParamName <- "overallPisTreatment" controlRateParamName <- "overallPisControl" } else if (.isMultiArmStageResults(stageResults)) { treatmentRateParamName <- "overallPiTreatments" controlRateParamName <- "overallPiControl" } summaryFactory$addParameter(stageResults, parameterName = treatmentRateParamName, parameterCaption = "Cumulative treatment rate", roundDigits = digitsGeneral ) summaryFactory$addParameter(stageResults, parameterName = controlRateParamName, parameterCaption = "Cumulative control rate", roundDigits = digitsGeneral, enforceFirstCase = TRUE ) } } if (.isTrialDesignGroupSequential(design)) { summaryFactory$addParameter(stageResults, parameterName = "overallTestStatistics", parameterCaption = "Overall test statistic", roundDigits = ifelse(digitsProbabilities > 1, digitsProbabilities - 1, digitsProbabilities), smoothedZeroFormat = TRUE ) summaryFactory$addParameter(stageResults, parameterName = ifelse(multiHypothesesEnabled, "separatePValues", "overallPValues"), parameterCaption = "Overall p-value", roundDigits = digitsProbabilities ) } else { summaryFactory$addParameter(stageResults, parameterName = "testStatistics", parameterCaption = "Stage-wise test statistic", roundDigits = ifelse(digitsProbabilities > 1, digitsProbabilities - 1, digitsProbabilities), smoothedZeroFormat = TRUE ) summaryFactory$addParameter(stageResults, parameterName = ifelse(multiHypothesesEnabled, "separatePValues", "pValues"), parameterCaption = "Stage-wise p-value", roundDigits = digitsProbabilities ) } if (!is.null(closedTestResults)) { if (outputSize == "large") { if (.isTrialDesignConditionalDunnett(design)) { summaryFactory$addParameter(closedTestResults, parameterName = "conditionalErrorRate", parameterCaption = "Conditional error rate", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) summaryFactory$addParameter(closedTestResults, parameterName = "secondStagePValues", parameterCaption = "Second stage p-value", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } else { summaryFactory$addParameter(closedTestResults, parameterName = "adjustedStageWisePValues", parameterCaption = "Adjusted stage-wise p-value", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) summaryFactory$addParameter(closedTestResults, parameterName = "overallAdjustedTestStatistics", parameterCaption = "Overall adjusted test statistic", roundDigits = digitsProbabilities - ifelse(.isTrialDesignFisher(design) || digitsProbabilities <= 1, 0, 1), smoothedZeroFormat = !.isTrialDesignFisher(design) ) } } else if (outputSize == "medium") { legendEntry <- list("(i, j, ...)" = "comparison of treatment arms 'i, j, ...' vs. control arm") gMax <- stageResults$getGMax() if (.isTrialDesignConditionalDunnett(design)) { summaryFactory$addParameter(closedTestResults, parameterName = "adjustedStageWisePValues", values = closedTestResults$conditionalErrorRate[1, ], parameterCaption = paste0( "Conditional error rate (", paste0(1:gMax, collapse = ", "), ")" ), roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE, legendEntry = legendEntry ) summaryFactory$addParameter(closedTestResults, parameterName = "overallAdjustedTestStatistics", values = closedTestResults$secondStagePValues[1, ], parameterCaption = paste0( "Second stage p-value (", paste0(1:gMax, collapse = ", "), ")" ), roundDigits = digitsProbabilities + ifelse(.isTrialDesignFisher(design), 1, 0), smoothedZeroFormat = !.isTrialDesignFisher(design), legendEntry = legendEntry ) } else { summaryFactory$addParameter(closedTestResults, parameterName = "adjustedStageWisePValues", values = closedTestResults$adjustedStageWisePValues[1, ], parameterCaption = paste0( "Adjusted stage-wise p-value (", paste0(1:gMax, collapse = ", "), ")" ), roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE, legendEntry = legendEntry ) summaryFactory$addParameter(closedTestResults, parameterName = "overallAdjustedTestStatistics", values = closedTestResults$overallAdjustedTestStatistics[1, ], parameterCaption = paste0( "Overall adjusted test statistic (", paste0(1:gMax, collapse = ", "), ")" ), roundDigits = digitsProbabilities - ifelse(.isTrialDesignFisher(design) || digitsProbabilities <= 1, 0, 1), smoothedZeroFormat = !.isTrialDesignFisher(design), legendEntry = legendEntry ) } } } if (multiHypothesesEnabled) { summaryFactory$addParameter(closedTestResults, parameterName = "rejected", parameterCaption = "Test action: reject", roundDigits = digitsGeneral ) } else { if (.isTrialDesignFisher(design)) { summaryFactory$addParameter(stageResults, parameterName = "combFisher", parameterCaption = "Fisher combination", roundDigits = 0 ) } else if (.isTrialDesignInverseNormal(design)) { summaryFactory$addParameter(stageResults, parameterName = "combInverseNormal", parameterCaption = "Inverse normal combination", roundDigits = ifelse(digitsProbabilities > 1, digitsProbabilities - 1, digitsProbabilities), smoothedZeroFormat = TRUE ) } summaryFactory$addParameter(analysisResults, parameterName = "testActions", parameterCaption = "Test action", roundDigits = digitsGeneral ) } if (design$kMax > 1 && !.isTrialDesignConditionalDunnett(design)) { summaryFactory$addParameter(analysisResults, parameterName = "conditionalRejectionProbabilities", parameterCaption = "Conditional rejection probability", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } if (design$kMax > 1) { if (!is.null(conditionalPowerResults)) { summaryFactory$addParameter(conditionalPowerResults, parameterName = "nPlanned", parameterCaption = "Planned sample size", roundDigits = -1 ) } else if (analysisResults$.getParameterType("nPlanned") != C_PARAM_NOT_APPLICABLE) { summaryFactory$addParameter(analysisResults, parameterName = "nPlanned", parameterCaption = "Planned sample size", roundDigits = -1 ) } } if (design$kMax > 1) { if (!is.null(conditionalPowerResults)) { summaryFactory$addParameter(conditionalPowerResults, parameterName = "conditionalPower", parameterCaption = "Conditional power", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } else if (!multiHypothesesEnabled && analysisResults$.getParameterType("nPlanned") != C_PARAM_NOT_APPLICABLE) { parameterName <- "conditionalPower" if (!is.null(analysisResults[["conditionalPowerSimulated"]]) && length(analysisResults[["conditionalPowerSimulated"]]) > 0) { parameterName <- "conditionalPowerSimulated" } summaryFactory$addParameter(analysisResults, parameterName = parameterName, parameterCaption = "Conditional power", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } } ciLevel <- round((1 - design$alpha * (3 - design$sided)) * 100, 2) if (.isTrialDesignConditionalDunnett(design)) { parameterCaptionRepeatedPValues <- "Overall p-value" parameterCaptionRepeatedCI <- paste0(ciLevel, "% overall confidence interval") } else { parameterCaptionRepeatedPValues <- ifelse(design$kMax == 1, ifelse(design$sided == 1, "One-sided p-value", "Two-sided p-value"), "Repeated p-value" ) parameterCaptionRepeatedCI <- paste0( ciLevel, "% ", ifelse(design$kMax == 1, "confidence interval", "repeated confidence interval") ) } summaryFactory$addParameter(analysisResults, parameterName = c("repeatedConfidenceIntervalLowerBounds", "repeatedConfidenceIntervalUpperBounds"), parameterCaption = parameterCaptionRepeatedCI, roundDigits = digitsGeneral ) summaryFactory$addParameter(analysisResults, parameterName = "repeatedPValues", parameterCaption = parameterCaptionRepeatedPValues, roundDigits = digitsProbabilities, formatRepeatedPValues = TRUE ) if (!multiHypothesesEnabled && !is.null(analysisResults[["finalStage"]]) && !all(is.na(analysisResults$finalStage))) { summaryFactory$addParameter(analysisResults, parameterName = "finalPValues", parameterCaption = "Final p-value", roundDigits = digitsProbabilities ) summaryFactory$addParameter(analysisResults, parameterName = c("finalConfidenceIntervalLowerBounds", "finalConfidenceIntervalUpperBounds"), parameterCaption = "Final confidence interval", roundDigits = digitsGeneral ) summaryFactory$addParameter(analysisResults, parameterName = "medianUnbiasedEstimates", parameterCaption = "Median unbiased estimate", roundDigits = digitsGeneral ) } return(summaryFactory) } .getSummaryDigits <- function(digits = NA_integer_) { if (is.na(digits)) { digits <- as.integer(getOption("rpact.summary.digits", 3)) } .assertIsSingleInteger(digits, "digits", validateType = FALSE, naAllowed = TRUE) .assertIsInClosedInterval(digits, "digits", lower = -1, upper = 12, naAllowed = TRUE) digitsSampleSize <- 1 if (digits > 0) { digitsGeneral <- digits digitsProbabilities <- NA_integer_ tryCatch( { digitsProbabilities <- as.integer(getOption("rpact.summary.digits.probs", digits + 1)) }, warning = function(e) { } ) if (is.na(digitsProbabilities)) { digitsProbabilities <- digits + 1 } .assertIsSingleInteger(digitsProbabilities, "digitsProbabilities", validateType = FALSE, naAllowed = FALSE) .assertIsInClosedInterval(digitsProbabilities, "digitsProbabilities", lower = -1, upper = 12, naAllowed = FALSE) } else { digitsSampleSize <- digits digitsGeneral <- digits digitsProbabilities <- digits } return(list( digits = digits, digitsSampleSize = digitsSampleSize, digitsGeneral = digitsGeneral, digitsProbabilities = digitsProbabilities )) } .getSummaryValuesInPercent <- function(values, percentFormatEnabled = TRUE, digits = 1) { if (!percentFormatEnabled) { return(as.character(round(values, digits + 2))) } return(paste0(round(100 * values, digits), "%")) } .addDesignInformationToSummary <- function(design, designPlan, summaryFactory, output = c("all", "title", "overview", "body")) { if (!(output %in% c("all", "overview"))) { return(invisible(summaryFactory)) } if (design$kMax == 1) { summaryFactory$addItem("Stage", "Fixed") return(invisible(summaryFactory)) } summaryFactory$addItem("Stage", c(1:design$kMax)) if (.isTrialDesignConditionalDunnett(design)) { summaryFactory$addItem( "Fixed information at interim", .getSummaryValuesInPercent(design$informationAtInterim, FALSE) ) return(invisible(summaryFactory)) } informationRatesCaption <- ifelse(inherits(designPlan, "SimulationResults") || inherits(designPlan, "AnalysisResults"), "Fixed weight", "Information") if (inherits(designPlan, "SimulationResults") || inherits(designPlan, "AnalysisResults")) { if (.isTrialDesignFisher(design)) { weights <- .getWeightsFisher(design) } else if (.isTrialDesignInverseNormal(design)) { weights <- .getWeightsInverseNormal(design) } else { weights <- design$informationRates } summaryFactory$addItem(informationRatesCaption, .getSummaryValuesInPercent(weights, FALSE)) } else { summaryFactory$addItem( paste0( informationRatesCaption, ifelse(inherits(designPlan, "SimulationResults"), "", " rate") ), .getSummaryValuesInPercent(design$informationRates) ) } if (design$.isDelayedResponseDesign()) { summaryFactory$addItem("Delayed information", .getSummaryValuesInPercent(design$delayedInformation, TRUE)) } return(invisible(summaryFactory)) } .addDesignParameterToSummary <- function(design, designPlan, designCharacteristics, summaryFactory, digitsGeneral, digitsProbabilities) { if (design$kMax > 1 && !inherits(designPlan, "SimulationResults") && !.isTrialDesignConditionalDunnett(design)) { summaryFactory$addParameter(design, parameterName = "alphaSpent", parameterCaption = "Cumulative alpha spent", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) if (design$.getParameterType("betaSpent") == C_PARAM_GENERATED) { summaryFactory$addParameter(design, parameterName = "betaSpent", parameterCaption = "Cumulative beta spent", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } } if (!is.null(designPlan)) { if (!grepl("SimulationResults(MultiArm|Enrichment)", .getClassName(designPlan))) { outputSize <- getOption("rpact.summary.output.size", C_SUMMARY_OUTPUT_SIZE_DEFAULT) if (outputSize == "large" && inherits(designPlan, "SimulationResults")) { summaryFactory$addParameter(designPlan, parameterName = "conditionalPowerAchieved", parameterCaption = "Conditional power (achieved)", roundDigits = digitsProbabilities ) } } } else { powerObject <- NULL if (!is.null(designCharacteristics)) { powerObject <- designCharacteristics } else if (design$.getParameterType("power") == C_PARAM_GENERATED) { powerObject <- design } if (!is.null(powerObject)) { summaryFactory$addParameter(powerObject, parameterName = "power", parameterCaption = ifelse(design$kMax == 1, "Power", "Overall power"), roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } if (design$kMax > 1 && .isTrialDesignInverseNormalOrGroupSequential(design)) { tryCatch( { designCharacteristics <- getDesignCharacteristics(design) }, error = function(e) { designCharacteristics <- NULL } ) if (!is.null(designCharacteristics) && !any(is.na(designCharacteristics$futilityProbabilities)) && any(designCharacteristics$futilityProbabilities > 0)) { summaryFactory$addParameter(designCharacteristics, parameterName = "futilityProbabilities", parameterCaption = "Futility probabilities under H1", roundDigits = digitsGeneral, smoothedZeroFormat = TRUE ) } } } if (design$.isDelayedResponseDesign()) { summaryFactory$addParameter(design, parameterName = "decisionCriticalValues", parameterCaption = "Decision critical values", roundDigits = digitsGeneral, smoothedZeroFormat = TRUE ) outputSize <- getOption("rpact.summary.output.size", C_SUMMARY_OUTPUT_SIZE_DEFAULT) if (outputSize == "large") { summaryFactory$addParameter(design, parameterName = "reversalProbabilities", parameterCaption = "Reversal probabilities", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } } if (.isTrialDesignConditionalDunnett(design)) { summaryFactory$addParameter(design, parameterName = "alpha", parameterCaption = "Significance level", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } else if (!is.null(designPlan) && !inherits(designPlan, "SimulationResults")) { summaryFactory$addParameter(design, parameterName = "stageLevels", twoSided = design$sided == 2, parameterCaption = paste0(ifelse(design$sided == 2, "Two", "One"), "-sided local significance level"), roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } return(summaryFactory) } #' #' Main function for creating a summary of a design or design plan #' #' @noRd #' .createSummaryDesignPlan <- function(object, digits = NA_integer_, output = c("all", "title", "overview", "body"), showStageLevels = FALSE, performanceScore = NULL) { output <- match.arg(output) designPlan <- NULL if (.isTrialDesignPlan(object) || inherits(object, "SimulationResults")) { design <- object$.design designPlan <- object } else if (inherits(object, "TrialDesignCharacteristics")) { design <- object$.design # designPlan <- object } else if (.isTrialDesign(object)) { design <- object } else { stop( C_EXCEPTION_TYPE_ILLEGAL_ARGUMENT, "'object' must be a valid design, design plan, ", "or simulation result object (is class ", .getClassName(object), ")" ) } digitSettings <- .getSummaryDigits(digits) digits <- digitSettings$digits digitsSampleSize <- digitSettings$digitsSampleSize digitsGeneral <- digitSettings$digitsGeneral digitsProbabilities <- digitSettings$digitsProbabilities outputSize <- getOption("rpact.summary.output.size", C_SUMMARY_OUTPUT_SIZE_DEFAULT) intervalFormat <- getOption("rpact.summary.intervalFormat", "[%s; %s]") .assertIsValidSummaryIntervalFormat(intervalFormat) summaryFactory <- SummaryFactory(object = object, intervalFormat = intervalFormat, output = output) if (output %in% c("all", "title", "overview")) { .addDesignInformationToSummary(design, designPlan, summaryFactory, output = output) } if (!(output %in% c("all", "body"))) { return(summaryFactory) } if (!.isTrialDesignConditionalDunnett(design)) { summaryFactory$addParameter(design, parameterName = "criticalValues", parameterCaption = .getSummaryParameterCaptionCriticalValues(design), roundDigits = digitsGeneral ) if (showStageLevels) { summaryFactory$addParameter(design, parameterName = "stageLevels", parameterCaption = "Stage levels (one-sided)", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } } if (.isTrialDesignFisher(design)) { if (any(design$alpha0Vec < 1)) { summaryFactory$addParameter(design, parameterName = "alpha0Vec", parameterCaption = "Futility boundary (separate p-value scale)", roundDigits = digitsGeneral ) } } else if (!.isTrialDesignConditionalDunnett(design)) { if (any(design$futilityBounds > C_FUTILITY_BOUNDS_DEFAULT, na.rm = TRUE)) { summaryFactory$addParameter(design, parameterName = "futilityBounds", parameterCaption = .getSummaryParameterCaptionFutilityBounds(design), roundDigits = digitsGeneral ) } } designCharacteristics <- NULL if (design$kMax > 1 && .isTrialDesignInverseNormalOrGroupSequential(design)) { tryCatch( { designCharacteristics <- getDesignCharacteristics(design) }, error = function(e) { designCharacteristics <- NULL } ) } if (is.null(designPlan)) { return(.addDesignParameterToSummary( design, designPlan, designCharacteristics, summaryFactory, digitsGeneral, digitsProbabilities )) } simulationEnabled <- grepl("SimulationResults", .getClassName(designPlan)) multiArmEnabled <- grepl("MultiArm", .getClassName(designPlan)) enrichmentEnabled <- grepl("Enrichment", .getClassName(designPlan)) baseEnabled <- grepl("(TrialDesignPlan|SimulationResults)(Means|Rates|Survival)", .getClassName(designPlan)) planningEnabled <- .isTrialDesignPlan(designPlan) simulationEnabled <- .isSimulationResults(designPlan) survivalEnabled <- grepl("Survival", .getClassName(designPlan)) probsH0 <- NULL probsH1 <- NULL if (design$kMax > 1) { if (!is.null(designCharacteristics) && .isTrialDesignInverseNormalOrGroupSequential(design) && length(designCharacteristics$shift) == 1 && !is.na(designCharacteristics$shift) && designCharacteristics$shift >= 1) { probsH0 <- getPowerAndAverageSampleNumber(design, theta = 0, nMax = designCharacteristics$shift) probsH1 <- getPowerAndAverageSampleNumber(design, theta = 1, nMax = designCharacteristics$shift) } if (!is.null(designPlan[["rejectPerStage"]])) { probsH1 <- list( earlyStop = designPlan$rejectPerStage[1:(design$kMax - 1), ] + as.vector(designPlan$futilityPerStage), rejectPerStage = designPlan$rejectPerStage, futilityPerStage = designPlan$futilityPerStage ) numberOfVariants <- 1 if (inherits(designPlan, "ParameterSet")) { parameterNames <- designPlan$.getVisibleFieldNamesOrdered() numberOfVariants <- .getMultidimensionalNumberOfVariants(designPlan, parameterNames) } if (numberOfVariants > 1 && is.matrix(probsH1$earlyStop) && ncol(probsH1$earlyStop) == 1) { probsH1$earlyStop <- matrix(rep(probsH1$earlyStop, numberOfVariants), ncol = numberOfVariants) probsH1$rejectPerStage <- matrix(rep(probsH1$rejectPerStage, numberOfVariants), ncol = numberOfVariants) probsH1$futilityPerStage <- matrix(rep(probsH1$futilityPerStage, numberOfVariants), ncol = numberOfVariants) } } } if (simulationEnabled && (multiArmEnabled || enrichmentEnabled)) { # simulation multi-arm #1:rejectAtLeastOne per mu_max summaryFactory$addParameter(designPlan, parameterName = "rejectAtLeastOne", parameterCaption = "Reject at least one", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE, transpose = TRUE, legendEntry = { if (multiArmEnabled) list("(i)" = "treatment arm i") else list() } ) # simulation multi-arm #2: rejectedArmsPerStage if (outputSize == "large" && multiArmEnabled) { .addSimulationMultiArmArrayParameter(designPlan, parameterName = "rejectedArmsPerStage", parameterCaption = ifelse(design$kMax == 1, "Rejected arms", "Rejected arms per stage"), summaryFactory, roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } # simulation enrichment #2: rejectedPopulationsPerStage if (outputSize == "large" && enrichmentEnabled) { .addSimulationArrayToSummary(designPlan, parameterName = "rejectedPopulationsPerStage", parameterCaption = ifelse(design$kMax == 1, "Rejected populations", "Rejected populations per stage"), summaryFactory, digitsSampleSize = digitsProbabilities, smoothedZeroFormat = TRUE ) } # simulation multi-arm #3: successPerStage summaryFactory$addParameter(designPlan, parameterName = "successPerStage", parameterCaption = "Success per stage", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE, transpose = TRUE ) # simulation multi-arm #4: futilityPerStage if (!planningEnabled && !baseEnabled && any(designPlan$futilityPerStage != 0)) { summaryFactory$addParameter(designPlan, parameterName = "futilityPerStage", parameterCaption = "Exit probability for futility", # (under H1) roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE, transpose = TRUE ) } if (survivalEnabled) { summaryFactory$addParameter(designPlan, parameterName = "expectedNumberOfEvents", parameterCaption = "Expected number of events", roundDigits = digitsSampleSize, transpose = TRUE ) } else { summaryFactory$addParameter(designPlan, parameterName = "expectedNumberOfSubjects", parameterCaption = "Expected number of subjects", roundDigits = digitsSampleSize, transpose = TRUE ) } # simulation multi-arm #5: earlyStop per mu_max if (outputSize %in% c("medium", "large")) { summaryFactory$addParameter(designPlan, parameterName = "earlyStop", parameterCaption = "Overall exit probability", # (under H1) roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE, transpose = TRUE ) } # simulation multi-arm / enrichment #6: sampleSizes if (outputSize %in% c("medium", "large")) { if (survivalEnabled) { if (enrichmentEnabled) { parameterName <- "singleNumberOfEventsPerStage" parameterCaption <- "Single number of events" } else { parameterName <- "eventsPerStage" parameterCaption <- "Cumulative number of events" } } else { parameterName <- "sampleSizes" parameterCaption <- "Stagewise number of subjects" } .addSimulationArrayToSummary( designPlan, parameterName, parameterCaption, summaryFactory, digitsSampleSize, smoothedZeroFormat = TRUE ) } # simulation multi-arm #7: selectedArms if (multiArmEnabled && outputSize %in% c("medium", "large")) { .addSimulationMultiArmArrayParameter( designPlan = designPlan, parameterName = "selectedArms", parameterCaption = "Selected arms", summaryFactory = summaryFactory, roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } # simulation enrichment #7: selectedPopulations if (enrichmentEnabled && outputSize %in% c("medium", "large")) { .addSimulationArrayToSummary( designPlan = designPlan, parameterName = "selectedPopulations", parameterCaption = "Selected populations", summaryFactory = summaryFactory, digitsSampleSize = digitsProbabilities, smoothedZeroFormat = TRUE ) } # simulation multi-arm #8: numberOfActiveArms if (multiArmEnabled && outputSize %in% c("medium", "large")) { summaryFactory$addParameter(designPlan, parameterName = "numberOfActiveArms", parameterCaption = "Number of active arms", roundDigits = digitsGeneral, transpose = TRUE ) } # simulation enrichment #8: numberOfPopulations if (enrichmentEnabled && outputSize %in% c("medium", "large")) { summaryFactory$addParameter(designPlan, parameterName = "numberOfPopulations", parameterCaption = "Number of populations", roundDigits = digitsGeneral, transpose = TRUE ) } if (outputSize == "large") { summaryFactory$addParameter(designPlan, parameterName = "conditionalPowerAchieved", parameterCaption = "Conditional power (achieved)", roundDigits = digitsProbabilities, transpose = TRUE ) } } if (baseEnabled) { parameterName <- "rejectPerStage" if (design$kMax == 1) { parameterName <- "overallReject" } if (any(!is.na(designPlan[[parameterName]]))) { summaryFactory$addParameter(designPlan, parameterName = parameterName, parameterCaption = ifelse(design$kMax == 1, "Power", "Overall power"), roundDigits = digitsProbabilities, cumsumEnabled = TRUE, smoothedZeroFormat = TRUE ) } if (inherits(designPlan, "SimulationResults")) { parameterName1 <- ifelse(survivalEnabled, "numberOfSubjects", "sampleSizes") parameterName2 <- "eventsPerStage" } else { if (design$kMax == 1 && (designPlan$.isSampleSizeObject() || .isTrialDesignPlanMeans(designPlan) || .isTrialDesignPlanRates(designPlan))) { parameterName1 <- "nFixed" parameterName2 <- "eventsFixed" } else if (design$kMax == 1 && designPlan$.isPowerObject()) { parameterName1 <- "expectedNumberOfSubjects" parameterName2 <- "expectedNumberOfEvents" } else { parameterName1 <- "numberOfSubjects" parameterName2 <- "eventsPerStage" } } if (design$kMax > 1) { summaryFactory$addParameter(designPlan, parameterName = ifelse(inherits(designPlan, "TrialDesignPlan") && designPlan$.isSampleSizeObject(), "expectedNumberOfSubjectsH1", "expectedNumberOfSubjects" ), parameterCaption = "Expected number of subjects", roundDigits = digitsSampleSize, transpose = TRUE ) } if (outputSize %in% c("medium", "large")) { subjectsCaption <- ifelse(design$kMax > 1 && inherits(designPlan, "SimulationResults") && !survivalEnabled, "Stagewise number of subjects", "Number of subjects") summaryFactory$addParameter(designPlan, parameterName = parameterName1, parameterCaption = subjectsCaption, roundDigits = digitsSampleSize ) } if (survivalEnabled) { if (design$kMax > 1 && !(inherits(designPlan, "TrialDesignPlanSurvival") && designPlan$.isSampleSizeObject())) { summaryFactory$addParameter(designPlan, parameterName = "expectedNumberOfEvents", parameterCaption = "Expected number of events", roundDigits = digitsSampleSize, transpose = TRUE ) } if (outputSize %in% c("medium", "large")) { summaryFactory$addParameter(designPlan, parameterName = parameterName2, parameterCaption = ifelse(design$kMax == 1, "Number of events", "Cumulative number of events" ), roundDigits = digitsSampleSize, cumsumEnabled = FALSE ) } if (outputSize == "large") { summaryFactory$addParameter(designPlan, parameterName = "analysisTime", parameterCaption = "Analysis time", roundDigits = digitsSampleSize ) } summaryFactory$addParameter(designPlan, parameterName = "studyDuration", parameterCaption = "Expected study duration", roundDigits = digitsSampleSize, smoothedZeroFormat = TRUE, transpose = TRUE ) } } if (!is.null(designPlan[["allocationRatioPlanned"]]) && length(unique(designPlan$allocationRatioPlanned)) > 1) { summaryFactory$addParameter(designPlan, parameterName = "allocationRatioPlanned", parameterCaption = "Optimum allocation ratio", roundDigits = digitsGeneral ) } .addDesignParameterToSummary( design, designPlan, designCharacteristics, summaryFactory, digitsGeneral, digitsProbabilities ) if (baseEnabled && !planningEnabled && !is.null(designPlan[["futilityPerStage"]]) && !any(is.na(designPlan[["futilityPerStage"]])) && any(designPlan$futilityPerStage != 0) && any(designPlan$futilityPerStage > 1e-08)) { summaryFactory$addParameter(designPlan, parameterName = "futilityPerStage", parameterCaption = "Exit probability for futility", # (under H1) roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } if (baseEnabled && simulationEnabled && design$kMax > 1) { values <- NULL if (!is.null(probsH1)) { values <- probsH1$rejectPerStage } summaryFactory$addParameter(designPlan, parameterName = "rejectPerStage", values = values, parameterCaption = "Exit probability for efficacy", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } # sample size and power only if (planningEnabled) { legendEntry <- list("(t)" = "treatment effect scale") if (ncol(designPlan$criticalValuesEffectScale) > 0) { summaryFactory$addParameter(designPlan, parameterName = "criticalValuesEffectScale", parameterCaption = ifelse(.isDelayedInformationEnabled(design = design), "Upper bounds of continuation (t)", "Efficacy boundary (t)" ), roundDigits = digitsGeneral, legendEntry = legendEntry ) } else if (ncol(designPlan$criticalValuesEffectScaleUpper) > 0) { summaryFactory$addParameter(designPlan, parameterName = "criticalValuesEffectScaleLower", parameterCaption = "Lower efficacy boundary (t)", roundDigits = digitsGeneral, legendEntry = legendEntry ) summaryFactory$addParameter(designPlan, parameterName = "criticalValuesEffectScaleUpper", parameterCaption = "Upper efficacy boundary (t)", roundDigits = digitsGeneral, legendEntry = legendEntry ) } if (ncol(designPlan$futilityBoundsEffectScale) > 0 && !all(is.na(designPlan$futilityBoundsEffectScale))) { summaryFactory$addParameter(designPlan, parameterName = "futilityBoundsEffectScale", parameterCaption = ifelse(.isDelayedInformationEnabled(design = design), "Lower bounds of continuation (t)", "Futility boundary (t)" ), roundDigits = digitsGeneral, legendEntry = legendEntry ) } else if (ncol(designPlan$futilityBoundsEffectScaleUpper) > 0 && (any(!is.na(designPlan$futilityBoundsEffectScaleLower)) || any(!is.na(designPlan$futilityBoundsEffectScaleUpper)))) { summaryFactory$addParameter(designPlan, parameterName = "futilityBoundsEffectScaleLower", parameterCaption = "Lower futility boundary (t)", roundDigits = digitsGeneral, legendEntry = legendEntry ) summaryFactory$addParameter(designPlan, parameterName = "futilityBoundsEffectScaleUpper", parameterCaption = "Upper futility boundary (t)", roundDigits = digitsGeneral, legendEntry = legendEntry ) } if (!is.null(probsH1) && !is.null(probsH0) && design$kMax > 1) { probsH0$earlyStop <- matrix(probsH0$earlyStop[1:(design$kMax - 1), 1], ncol = 1) probsH0$rejectPerStage <- matrix(probsH0$rejectPerStage[1:(design$kMax - 1), 1], ncol = 1) if (is.matrix(probsH1$rejectPerStage)) { if (design$kMax > 1 && designPlan$.isSampleSizeObject()) { probsH1$rejectPerStage <- probsH1$rejectPerStage[1:(design$kMax - 1), 1] } else { probsH1$rejectPerStage <- matrix(probsH1$rejectPerStage[1:(design$kMax - 1), ], ncol = ncol(probsH1$rejectPerStage) ) } } else { probsH1$rejectPerStage <- probsH1$rejectPerStage[1:(design$kMax - 1)] } if (any(design$futilityBounds > -6)) { if (is.matrix(probsH1$earlyStop)) { probsH1$earlyStop <- matrix(probsH1$earlyStop[1:(design$kMax - 1), ], ncol = ncol(probsH1$earlyStop) ) } else { probsH1$earlyStop <- probsH1$earlyStop[1:(design$kMax - 1)] } summaryFactory$addParameter(probsH0, parameterName = "earlyStop", parameterCaption = "Overall exit probability (under H0)", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) x <- designPlan if (is.null(x)) { x <- design } summaryFactory$addParameter(x, parameterName = "earlyStop", values = probsH1$earlyStop, parameterCaption = "Overall exit probability (under H1)", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } summaryFactory$addParameter(probsH0, parameterName = "rejectPerStage", parameterCaption = "Exit probability for efficacy (under H0)", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) if (designPlan$.isPowerObject()) { summaryFactory$addParameter(designPlan, parameterName = "rejectPerStage", values = probsH1$rejectPerStage, parameterCaption = "Exit probability for efficacy (under H1)", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } else { summaryFactory$addParameter(probsH1, parameterName = "rejectPerStage", parameterCaption = "Exit probability for efficacy (under H1)", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } if (any(design$futilityBounds > -6)) { summaryFactory$addParameter(probsH0, parameterName = "futilityPerStage", parameterCaption = "Exit probability for futility (under H0)", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) x <- designPlan if (is.null(x)) { x <- design } futilityPerStage <- probsH1$futilityPerStage if (.isTrialDesignPlan(x) && x$.isSampleSizeObject() && ncol(futilityPerStage) > 1) { futilityPerStage <- futilityPerStage[, 1] } summaryFactory$addParameter(x, parameterName = "futilityPerStage", values = futilityPerStage, parameterCaption = "Exit probability for futility (under H1)", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } } } if (!is.null(performanceScore)) { print(performanceScore) summaryFactory$addParameter(performanceScore, parameterName = "performanceScore", parameterCaption = "Performance score", roundDigits = digitsProbabilities, smoothedZeroFormat = TRUE ) } return(summaryFactory) } .getSummaryVariedParameterNameEnrichment <- function(designPlan) { if (grepl("Rates", .getClassName(designPlan))) { return("piTreatments") } if (grepl("Survival", .getClassName(designPlan))) { return("hazardRatios") } return("effects") } .getSummaryGroup <- function(parameterCaption, numberOfVariedParams, variedParamNumber, designPlan) { if (numberOfVariedParams <= 1) { return(list( groupCaption = parameterCaption, legendEntry = list() )) } enrichmentEnabled <- grepl("SimulationResultsEnrichment", .getClassName(designPlan)) if (enrichmentEnabled) { variedParameterName <- .getSummaryVariedParameterNameEnrichment(designPlan) variedParameterValues <- designPlan$effectList[[variedParameterName]] if (variedParameterName == "piTreatments") { variedParameterCaption <- "pi(treatment)" } else { variedParameterCaption <- C_PARAMETER_NAMES[[variedParameterName]] } if (is.matrix(variedParameterValues) && ncol(variedParameterValues) == 1) { variedParameterCaption <- sub("s$", "", variedParameterCaption) } } else { variedParameterName <- .getVariedParameterSimulationMultiArm(designPlan) variedParameterValues <- designPlan[[variedParameterName]] variedParameterCaption <- C_PARAMETER_NAMES[[variedParameterName]] } userDefinedEffectMatrix <- !enrichmentEnabled && designPlan$.getParameterType("effectMatrix") == C_PARAM_USER_DEFINED if (userDefinedEffectMatrix) { return(list( groupCaption = paste0(parameterCaption, " [", variedParamNumber, "]"), legendEntry = list("[j]" = "effect matrix row j (situation to consider)") )) } if (is.matrix(variedParameterValues)) { values <- variedParameterValues[variedParamNumber, ] if (length(values) > 1) { values <- .arrayToString(values, vectorLookAndFeelEnabled = TRUE) } } else { values <- variedParameterValues[variedParamNumber] } if (is.numeric(values)) { values <- round(values, 2) } return(list( groupCaption = paste0( parameterCaption, ", ", tolower(variedParameterCaption), " = ", values ), legendEntry = list() )) } .getSummaryGroupCaption <- function(designPlan, parameterName, numberOfGroups, groupNumber) { listItemPrefix <- getOption("rpact.summary.list.item.prefix", C_SUMMARY_LIST_ITEM_PREFIX_DEFAULT) if (grepl("Enrichment", .getClassName(designPlan))) { categoryCaption <- .getCategoryCaptionEnrichment(designPlan, parameterName, groupNumber) categoryCaption <- sub("^F$", "Full population F", categoryCaption) categoryCaption <- sub("^R$", "Remaining population R", categoryCaption) categoryCaption <- sub("^S", "Subset S", categoryCaption) return(paste0(listItemPrefix, categoryCaption)) } treatmentCaption <- ifelse(numberOfGroups > 2, paste0("Treatment arm ", groupNumber), "Treatment arm") if (!grepl("Survival", .getClassName(designPlan)) || (inherits(designPlan, "SimulationResultsMultiArmSurvival") && parameterName == "singleNumberOfEventsPerStage")) { return(ifelse(groupNumber == numberOfGroups, paste0(listItemPrefix, "Control arm"), paste0(listItemPrefix, treatmentCaption) )) } return(paste0(listItemPrefix, treatmentCaption, " vs. control")) } .addSimulationArrayToSummary <- function(designPlan, parameterName, parameterCaption, summaryFactory, digitsSampleSize, smoothedZeroFormat = FALSE) { arrayData <- designPlan[[parameterName]] if (is.null(arrayData)) { stop(C_EXCEPTION_TYPE_RUNTIME_ISSUE, class(designPlan)[1], " does not contain the field ", sQuote(parameterName)) } numberOfVariedParams <- dim(arrayData)[2] numberOfGroups <- dim(arrayData)[3] for (variedParamNumber in 1:numberOfVariedParams) { summaryGroup <- .getSummaryGroup( parameterCaption, numberOfVariedParams, variedParamNumber, designPlan ) groupCaption <- summaryGroup$groupCaption legendEntry <- summaryGroup$legendEntry if (numberOfGroups > 1) { summaryFactory$addItem(groupCaption, "", legendEntry = legendEntry) } for (groupNumber in 1:numberOfGroups) { dataPerGroupAndStage <- arrayData[, variedParamNumber, groupNumber] if (numberOfGroups > 1) { groupCaption <- .getSummaryGroupCaption( designPlan, parameterName, numberOfGroups, groupNumber ) } summaryFactory$addParameter(designPlan, parameterName = parameterName, values = dataPerGroupAndStage, parameterCaption = groupCaption, roundDigits = digitsSampleSize, smoothedZeroFormat = smoothedZeroFormat, enforceFirstCase = TRUE ) } } } .addSimulationMultiArmArrayParameter <- function(designPlan, parameterName, parameterCaption, summaryFactory, roundDigits, smoothedZeroFormat = FALSE) { arrayData <- designPlan[[parameterName]] if (is.array(arrayData) && length(dim(arrayData)) == 3) { totalNumberOfGroups <- dim(designPlan[[ifelse(grepl("Survival", .getClassName(designPlan)), "eventsPerStage", "sampleSizes" )]])[3] numberOfGroups <- dim(arrayData)[3] if (parameterName == "selectedArms" && !grepl("Survival", .getClassName(designPlan))) { # remove control group numberOfGroups <- numberOfGroups - 1 } numberOfVariedParams <- dim(arrayData)[2] for (variedParamNumber in 1:numberOfVariedParams) { summaryGroup <- .getSummaryGroup( parameterCaption, numberOfVariedParams, variedParamNumber, designPlan ) groupCaption <- summaryGroup$groupCaption legendEntry <- summaryGroup$legendEntry if (numberOfGroups > 1) { summaryFactory$addItem(groupCaption, "", legendEntry = legendEntry) } for (groupNumber in 1:numberOfGroups) { dataPerGroupAndStage <- arrayData[, variedParamNumber, groupNumber] if (numberOfGroups > 1) { groupCaption <- .getSummaryGroupCaption( designPlan, parameterName, totalNumberOfGroups, groupNumber ) } summaryFactory$addParameter(designPlan, parameterName = parameterName, values = dataPerGroupAndStage, parameterCaption = groupCaption, roundDigits = roundDigits, smoothedZeroFormat = smoothedZeroFormat, enforceFirstCase = TRUE ) } } } else { data <- designPlan[[parameterName]] numberOfGroups <- ncol(data) for (groupNumber in 1:numberOfGroups) { dataPerGroupAndStage <- data[, groupNumber] summaryFactory$addParameter(designPlan, parameterName = parameterName, values = dataPerGroupAndStage, parameterCaption = ifelse(groupNumber == numberOfGroups, paste0(parameterCaption, ", control"), paste0(parameterCaption, ", treatment ", groupNumber) ), roundDigits = roundDigits, smoothedZeroFormat = smoothedZeroFormat ) } } } rpact/NEWS.md0000644000176200001440000005542614450500657012503 0ustar liggesusers # rpact 3.4.0 ## New features * The new function `getPerformanceScore()` calculates the conditional performance score, its sub-scores and components according to Herrmann et al. (2020) for a given simulation result from a two-stage design * `allocationRatioPlanned` for simulating multi-arm and enrichment designs can be a vector of length kMax, the number of stages * `getObjectRCode()` (short: `rcmd()`): with the new arguments `pipeOperator` and `output` many new output variants can be specified, e.g., the native R pipe operator or the magrittr pipe operator can be used * Generic function `knitr::knit_print` for all result objects implemented and automatic code chunk option `results = 'asis'` activated ## Improvements, issues, and changes * Improved speed of numerical computation of group sequential designs and test characteristics * Multivariate t distribution restricted to `df <= 500` because of erroneous results in `mnormt` package otherwise. For `df > 500`, multivariate normal distribution is used * Performance of cumulative distribution function and survival function plot improved * Test coverage extended and improved * Descriptions for all class fields added * Renamed field `omega` to `chi` in class `TrialDesignPlanSurvival` * Several minor improvements # rpact 3.3.4 * Rcpp sugar function `sapply` removed from C++ code to stop deprecated warnings on r-devel-linux-x86_64-fedora-clang * Minor improvements # rpact 3.3.3 * `allocationRatioPlanned` for simulating means and rates for a two treatment groups design can be a vector of length kMax, the number of stages * `calcSubjectsFunction` can be used in C++ version for simulating means and rates * `calcEventsFunction` added in getSimulationSurvival() * `getPerformanceScore()` added: calculates the performance score for simulation means results (1 and 2 groups; 2 stages) * Performance of simulation rates improved for 1 and 2 groups (by translating from R to C++) * Performance of simulation means improved for 1 and 2 groups * Two-sided O'Brien and Fleming beta-spending function corrected * Issue in plot type 5 for sample size means and rates fixed * Added dependency on R >= 3.6.0 * Minor improvements # rpact 3.3.2 * Design objects can be piped into `getDataset()` to enable pipe syntax for analysis, e.g., `getDesignGroupSequential() |> getDataset(dataMeans) |> getAnalysisResults()` * Performance of simulation means improved for 1 and 2 groups (by translating from R to C++) * Total test time was cut in half by improving simulation performance and enabling parallel testing * `SystemRequirements: C++11` added to DESCRIPTION to enable C++ 11 compilation on R 3.x * Minor improvements # rpact 3.3.1 * Help pages improved * Parameter `betaAdjustment` can also be used in `getDesignInverseNormal()` * `subsets` removed from result of `getWideFormat()` for non-enrichment datasets * Summary of enrichment survival simulation results improved * Parameter `populations` in `getSimulationEnrichmentMeans()`, `getSimulationEnrichmentRates()`, and `getSimulationEnrichmentSurvival()` has been removed since it is always derived from `effectList` * Bug fixed in `getSimulationEnrichmentRates()` for calculated non-integer number of subjects * Futility probabilities and futility bounds corrected for two-sided beta-spending function approach * `getRawData()`: the resulting `data.frame` now contains the correct `stopStage` and `lastObservationTime` (formerly `observationTime`) * `deltaWT` is provided with three decimal points for typeOfDesign = "WToptimum" * Generic `as.data.frame` functions improved * testthat version changed to edition 3 * The rpact source code has been published on GitHub and the bug report link has been changed to https://github.com/rpact-com/rpact/issues * Minor improvements # rpact 3.3.0 ## New features * Two-sided beta-spending approach with binding and non-binding futility bounds * Delayed response utility added in design specification ## Improvements, issues, and changes * `getSimulationMultiArmSurvival()`: single stage treatment arm specific event numbers account for selection procedure * User defined selection function can be used in `getSimulationEnrichmentRates()` and `getSimulationEnrichmentSurvival()` * Design summary extended by information of `getDesignCharacteristics()` * `getSimulationSurvival()`: the result object now contains the new parameter `overallEventsPerStage`, which contains the values previously given in `eventsPerStage` (it was "cumulative" by mistake); `eventsPerStage` contains now the non-cumulative values as expected * Minor improvements # rpact 3.2.3 * Performance of group sequential and Fisher's combination test designs improved * 'register' storage class specifier removed from C++ sources * Minor improvements # rpact 3.2.2 * Performance of group sequential and Fisher's combination test designs improved (by translating from R to C++) * Numerical issue in analysis time calculation for survival design in specific cases resolved * The internally used minimum quantile function value was changed from `stats::qnorm(1e-323)` to `stats::qnorm(1e-100)` * Unit tests extended * Minor improvements # rpact 3.2.1 * C++ warning "using integer absolute value function 'abs' when argument is of floating point type" under r-devel-linux-x86_64-debian-clang removed * getDataset: support of emmeans result objects as input improved * `getAnalysisResults()`: issue with zero values in the argument 'userAlphaSpending' fixed * Minor improvements # rpact 3.2.0 ## New features * Simulation tools for enrichment design testing means, rates, and hazard ratios: function `getSimulationEnrichmentMeans()`, `getSimulationEnrichmentRates()`, `getSimulationEnrichmentSurvival()` available for simulation of enrichment designs; note that this is a novel implementation, hence experimental * `getDesignGroupSequential()` / `getDesignInverseNormal()`: new typeOfDesign = "noEarlyEfficacy" added ## Improvements, issues, and changes * `getSimulationSurvival()`: bug fixed for accruallIntensity = 0 at some accrual intervals * For observed conditional power, standardized theta not truncated to 0 any more in `getSimulationMultiArmMeans()`, `getSimulationMultiArmRates()`, and `getSimulationMultiArmSurvival()` * Conditional power calculation for analysis rates takes into account differently the null value of condErrorRate * Function `testPackage()`: a problem with downloading full set of unit tests under Debian/Linux has been fixed * Generic function `kable()` improved: optional knitr::kable arguments enabled, e.g., format * In print and summary output, "overall" renamed to "cumulative" if means, stDevs, or rate are calculated over stages rather than stage-wise * getDataset: support of emmeans result objects as input improved * Numerical accuracy of `qnorm()` calculations improved * Analysis enrichment results now support the generic function `as.data.frame()` * Naming of the stage results parameters in the print output improved * New example data added: "rawDataTwoArmNormal" * Issue in summary fixed: earlyStop and rejectPerStage were no longer displayed * Minor improvements # rpact 3.1.1 * Performance of two-sided Pampallona & Tsiatis design improved * 12 example datasets added * Sample sizes in plots now have the same format as in print output; format can be changed using setOutputFormat() * getDataset supports emmeans result objects as input * Print output of simulation results improved * Added dependency on R >= 3.5.0 because serialized objects in serialize/load version 3 cannot be read in older versions of R * Plot label interface for configuration via the rpact Shiny app implemented * Minor improvements # rpact 3.1.0 ## New features * Analysis tools for enrichment design testing means, rates, and hazard ratios: function `getAnalysisResults()` generalized for enrichment designs; function `getDataset()` generalized for entering stratified data; manual extended for enrichment designs * Automatic boundary recalculations during the trial for analysis with alpha spending approach, including under- and over-running: setup via the optional parameters 'maxInformation' and 'informationEpsilon' in function `getAnalysisResults()` * The new function `getObjectRCode()` (short: `rcmd()`) returns the original R command which produced any rpact result object, including all dependencies * `getWideFormat()` and `getLongFormat()` return a dataset object in wide format (unstacked) or long format (narrow, stacked) * Generic function `kable()` returns the output of an rpact result object formatted in Markdown. * Generic function `t()` returns the transpose of an rpact result object ## Improvements, issues, and changes * New argument 'plotSettings' added to all plot functions * Summary for design, simulation, and analysis unified and extended * Issue in `getDesignFisher()` fixed: `getDesignFisher(method = "noInteraction", kMax = 3)` and `getDesignFisher(method = "noInteraction")` produced different results * 'normalApproximation' default value changed to TRUE for multi-arm analysis of rates * Repeated p-values: in search algorithm, upper bound of significance level corrected when considering binding futility bounds * `testPackage()`: the default call is now running only a small subset of all available unit tests; with the new argument 'connection' the owners of the rpact validation documentation can enter a 'token' and a 'secret' to get full access to all unit tests * Scaling of grid plots improved * Minor improvements # rpact 3.0.4 * Beta-spending function approach with binding futility bounds * Pampallona & Tsiatis design with binding and non-binding futility bounds * Argument 'accrualIntensityType' added to `getSampleSizeSurvival()`, `getSimulationSurvival()`, `getNumberOfSubjects()`, and `getEventProbabilities()` * Specification of Weibull survival times possible through definition of hazard rates or medians in simulation tool * Minor improvements # rpact 3.0.3 * New utility functions `getParameterCaption()` and `getParameterName()` implemented * Design parameters added to simulation print output * Generic function `as.matrix()` improved for several result objects * Issue in `getAvailablePlotTypes()` for sample size and power results fixed * Issue for `getDesignFisher(kMax = 1)` in `getSimulationMultiArm...()` fixed * `getSimulationMultiArmSurvival()`: correlation of log-rank statistics revised and improved * `getSimulationMultiArmMeans()`: name of the first effectMeasure option "effectDifference" changed to "effectEstimate" * `getSimulation[MultiArm][Means/Rates/Survival]()`: argument 'showStatistics' now works correctly and is consistently FALSE by default for multi-arm and non-multi-arm * `getSimulation[MultiArm]Survival()`: generic function `summary()` improved * `getAnalysisResults()`: generic function `summary()` improved * `getAccrualTime()`: improved and new argument 'accrualIntensityType' added * Header text added to design summaries * `getSampleSizeSurvival()`: field 'studyDurationH1' in result object was replaced by 'studyDuration', i.e., 'studyDurationH1' is deprecated and will be removed in future versions * Minor changes in the inline help and manual * Minor improvements # rpact 3.0.2 * `getSimulationMultiArmSurvival()`: plannedEvents redefined as overall events over treatment arms * `getStageResults()`: element overallPooledStDevs added; print output improved * Unit tests improved: test coverage and references to the functional specification optimized * plot type 13 of `getSampleSizeSurvival()` with user defined lambdas with different lengths: issue fixed * Minor improvements # rpact 3.0.1 * Vignette "rpact: Getting Started" included into the package * New summary output option "rpact.summary.width" added * Generic function `summary()` improved for several result objects * Result output of function `testPackage()` improved * `getSimulationMultiArm[Means/Rates/Survival]()`: stage index corrected for user defined calcSubjectsFunction or calcEventsFunction * `getSimulationMultiArmRates()`: adjustment for identical simulated rates to account for ties * `getSimulationMultiArmSurvival()`: corrected correlation of test statistics * Output formatting improved * Minor improvements # rpact 3.0.0 ## New features * Simulation tools for multi-arm design testing means, rates, and hazard ratios * Analysis tools for multi-arm design testing means, rates, and hazard ratios * `getSimulationRates()`: exact versions for testing a rate (one-sample case) and equality of rates (two-sample case) * getDataset: multi-arm datasets for means, rates, and survival data * Analysis of fixed designs * Summary for analysis and simulation result objects newly implemented * Summary for most rpact result objects substantially improved and enhanced * `getEventProbabilities()`: plot of result object * `getNumberOfSubjects()`: plot of result object * Visual comparison of two designs: `plot(design1, design2)` * Functions setOutputFormat and getOutputFormat implemented: definition of user defined output formats * `getSimulationMeans()`: thetaH1 and stDevH1 can be specified for assessment of sample size recalculation (replaces thetaStandardized) * `getSimulationSurvival()`: separate p-values added to the aggregated simulation data for Fisher designs * `getSimulationMeans()`, `getSimulationRates()`: Cumulated number of subjects integrated in getData object * `getSimulation[MultiArm][Means/Rates/Survival]()`: new logical argument 'showStatistics' added * Example datasets (csv files) added to the package * plot type "all": plot all available plots of an object in one step using `plot(x, type = "all")` * plot type improved: 'type' now can be a vector, e.g., `plot(x, type = c(1, 3))` * `plot(x, grid = 1)`: new plot argument 'grid' enables the plotting of 2 or more plots in one graphic ## Improvements, issues, and changes * `getAnalysisResults()`: list output implemented analogous to the output of all other rpact objects * `getAnalysisResults()`: the following stage result arguments were removed from result object because they were redundant: effectSizes, testStatistics, and pValues. Please use the '.stageResults' object to access them, e.g., results\$.stageResults\$effectSizes * `getAnalysisResults()`: the following design arguments were removed from result object because they were redundant: stages, informationRates, criticalValues, futilityBounds, alphaSpent, and stageLevels. Please use the '.design' object to access them, e.g., results\$.design\$informationRates * Optional argument 'stage' removed from functions getConditionalPower, getConditionalRejectionProbabilities, getFinalPValue, getRepeatedPValues, and getTestActions * Function testPackage improved, e.g., results will be displayed now on screen * Help system renewed and approved, e.g., help for corresponding generic functions (e.g., plot) linked where applicable * Function getPiecewiseSurvivalTime improved: pi1 and pi2 will not be calculated any longer for lambda- or median-based definitions; eventTime only required for pi-based definitions * `plot(x, showSource = TRUE)` improved for all rpact result objects x * Performance of plotting analysis results of Fisher designs improved * `getSimulationRates()`: issue for futility stopping for Fisher's combination test fixed * `getSimulationSurvival()`: issue for expected number of events fixed * `getSimulationSurvival()`: if eventsNotAchieved > 0, rejection/futility rate and analysis time is estimated for valid simulation runs * `getSimulationSurvival()`: output improved for lambda1/median1/hazardRatio with length > 1 * `getSampleSizeSurvival()`: calculation of the maximum number of subjects given the provided argument 'followUpTime' improved * `getPiecewiseSurvivalTime()`: delayed response via list-based piecewiseSurvivalTime definition enabled * `getAccrualTime()` / `getSimulationSurvival()`: issue with the calculation of absolute accrual intensity by given relative accrual intensity fixed * `getRawData()`: issue for multiple pi1 solved * Implementation of the generic function 'names' improved * Test coverage improved: lots of new unit tests added * License information in the DESCRIPTION file corrected: changed from GPL-3 to LGPL-3 * Minor improvements # rpact 2.0.6 * Boundaries on effect scale for testing means now accounts for the unknown variance case * `getAnalysisSurvival()`: calculation of stage wise results not more in getStageResults * `getStageResults()`: the calculation of 'effectSizes' for survival data and thetaH0 != 1 was corrected * `getDataset()` of survival data: issue with the internal storage of log ranks fixed * Sample size plot: issue for kMax = 1 fixed * `getSampleSizeSurvival()` with piecewise survival time: issue with calculation of 'maxNumberOfSubjects' for given 'followUpTime' fixed * Internal Shiny app interface improved * Minor improvements # rpact 2.0.5 * Assumed median survival time: get[SampleSize/Power/Simulation]Survival now support direct input of arguments 'median1' and 'median2' * Output of generic function `summary()` improved * Plot type 5 of getPower[...] and getSimulation[...] objects improved * Output of `getSampleSizeSurvival()` with given maxNumberOfSubjects improved * Output of `get[SampleSize/Power]Survival()` for Kappa != 1 improved * Assert function for minNumberOfSubjectsPerStage corrected for undefined conditionalPower * Two-sided boundaries on effect scale in survival design improved * Error in `summary()` for `getDesign[...]()` fixed * Other minor improvements # rpact 2.0.4 * Incorrect output of function `summary()` fixed for `getSampleSize[...]()` and `getPower[...]()` * as.data.frame: default value of argument 'niceColumnNamesEnabled' changed from TRUE to FALSE # rpact 2.0.3 ## New features * Plot function for Fisher design implemented * Generic function `summary()` implemented for `getDesign[...]()`, `getSampleSize[...]()`, `getPower[...]()`, and `getSimulation[...]()` results: a simple boundary summary will be displayed ## Improvements, issues, and changes * Generic function as.data.frame improved for `getDesign[...]()`, `getSampleSize[...]()`, `getPower[...]()`, and `getSimulation[...]()` results * Output of `getStageResults()` improved * Improvements for Shiny app compatibility and better Shiny app performance * Repeated p-values are no longer calculated for typeOfDesign = "WToptimum" * Piecewise survival time improved for numeric definition: median and pi will not be calculated and displayed any longer * Plot: legend title and tick mark positioning improved; optional arguments xlim and ylim implemented * Sample size/power: usage of argument 'twoSidedPower' optimized * Performance of function rpwexp/getPiecewiseExponentialRandomNumbers improved (special thanks to Marcel Wolbers for his example code) * For group sequential designs a warning will be displayed if information rates from design not according to data information * Format for output of standard deviation optimized # rpact 2.0.2 * Minor corrections in the inline help * Labeling of lower and upper critical values (effect scale) reverted * Simulation for Fisher's combination test corrected * Parameter minNumberOfAdditionalEventsPerStage renamed to minNumberOfEventsPerStage * Parameter maxNumberOfAdditionalEventsPerStage renamed to maxNumberOfEventsPerStage * Parameter minNumberOfAdditionalSubjectsPerStage renamed to minNumberOfSubjectsPerStage * Parameter maxNumberOfAdditionalSubjectsPerStage renamed to maxNumberOfSubjectsPerStage * Output of function `getAccrualTime()` improved * Validation of arguments maxNumberOfIterations, allocation1, and allocation2 added: check for positive integer * Function `getSampleSizeSurvival()` improved: numeric search for accrualTime if followUpTime is given * Default value improved for analysis tools: if no effect was specified for conditional power calculation, the observed effect is selected * Fixed: function getDataset produced an error if only one log-rank value and one event was defined * Number of subjects per treatment arm are provided in output of simulation survival if allocation ratio != 1 * Function getSimulationSurvival improved: first value of minNumberOfEventsPerStage and maxNumberOfEventsPerStage must be NA or equal to first value of plannedSubjects # rpact 2.0.1 * Function base::isFALSE replaced to guarantee R 3.4.x compatibility * C++ compiler warning on r-devel-linux-x86_64-debian-clang system removed * C++ compiler error on r-patched-solaris-x86 system fixed # rpact 2.0.0 ## New features * Power calculation at given or adapted sample size for means, rates and survival data * Sample size and power calculation for survival trials with piecewise accrual time and intensity * Sample size and power calculation for survival trials with exponential survival time, piecewise exponential survival time and survival times that follow a Weibull distribution * Simulation tool for survival trials; our simulator is very fast because it was implemented with C++. Adaptive event number recalculations based on conditional power can be assessed * Simulation tool for designs with continuous and binary endpoints. Adaptive sample size recalculations based on conditional power can be assessed * Comprehensive and unified tool for performing sample size calculation for fixed sample size design * Enhanced plot functionalities ## Improvements, issues, and changes * Fisher design, analysis of means or rates, conditional rejection probabilities (CRP): calculation issue fixed for stage > 2 * Call of getSampleSize[Means/Rates/Survival] without design argument implemented * For all `set.seed()` calls 'kind' and 'normal.kind' were specified as follows: kind = "Mersenne-Twister", normal.kind = "Inversion" * Minor code optimizations, e.g. 'return()' replaced by 'return(invisible())' if reasonable * Bug in `readDatasets()` fixed: variable names 'group' and 'groups' are now accepted * "Overall reject per stage" and "Overall futility per stage" renamed to "Overall reject" and "Overall futility", respectively (also variable names) * Labels "events.." and "..patients.." consistently changed to "# events.." and "# patients...", respectively * Output format for 'allocationRatioPlanned' specified * Method 'show' of class 'ParameterSet' expanded: R Markdown output features implemented * `getSampleSizeSurvival()`: argument 'maxNumberOfPatients' was renamed in 'maxNumberOfSubjects' * Result output, inline help and documentation: the word 'patient' was replaced by 'subject' * Variables 'numberOfSubjectsGroup1' and 'numberOfSubjectsGroup2' were renamed to 'numberOfSubjects1' and 'numberOfSubjects1' * Final p-values for two-sided test (group sequential, inverse normal, and Fisher combination test) available * Upper and lower boundaries on effect scale for testing rates in two samples # rpact 1.0.0 * First release of rpact rpact/MD50000644000176200001440000006636714450555553011730 0ustar liggesuserse20e9928f3ed33b2bcb2f3e62c50b43b *DESCRIPTION 8331e4deba26eef226753b64f4946b1e *NAMESPACE 0ce851423d6dcf044c2a138ad59a3de2 *NEWS.md 3a813d5048af01f086bf6afb675acabb *R/RcppExports.R 3c13caa011972abab215330fc3fd47ee *R/class_analysis_dataset.R 05d19f34fee0419af8ca2661c75842e5 *R/class_analysis_results.R 76fc935c575795836b1be2c1db060196 *R/class_analysis_stage_results.R 2bbe578272db7d6abd673f02c9458ef6 *R/class_core_parameter_set.R 0cc5f14c854b82e27bebc8f40e0324dc *R/class_core_plot_settings.R 2e7138d3e8a89822a38e0d11649851eb *R/class_design.R 73307b3d8f44e46c6c9044b70a370d18 *R/class_design_plan.R 76d901b84793b2a3d59ec69e1e396d47 *R/class_design_power_and_asn.R 9e8573ea335d840466465eb7b0848289 *R/class_design_set.R 04a3f290ce74b33ea9e0bd1814bb3770 *R/class_event_probabilities.R b9cc8437c0e9e152c91b09ad32d9cba8 *R/class_performance_score.R a3aefa139cdeecda1a92d0d7948b0a51 *R/class_simulation_results.R 3ff31a61e7b38f0cf8f4df1bea43b15c *R/class_summary.R 1375322e9f6289cff71a4a443be8173d *R/class_time.R cd60cb7ed6adfa14d043008afbdb42a3 *R/data.R 5d32ec1b865d4abf889b4876dd993f15 *R/f_analysis_base.R 8f1582105d678bb4b9549888c36f2bc0 *R/f_analysis_base_means.R 81ad95e3c296eeaf80de3e1d9238ec04 *R/f_analysis_base_rates.R fe0f38804b1a9de661a9a3a7c88aac1c *R/f_analysis_base_survival.R ea435349d36912c15718d0b8294bfc5b *R/f_analysis_enrichment.R a12ec740208a286d78442b10a48d28ec *R/f_analysis_enrichment_means.R c883605f91b24b91984f9696cb5c1224 *R/f_analysis_enrichment_rates.R 883342af6a726c49256281bcb2092463 *R/f_analysis_enrichment_survival.R 0a661fa61ff52edc1912feef439f7592 *R/f_analysis_multiarm.R f9896a205901ed359700a35cb54591cd *R/f_analysis_multiarm_means.R 2f1fc5e34db0cd39fed8813ee94754d2 *R/f_analysis_multiarm_rates.R 6772908232bdef124e3f40e743f5f852 *R/f_analysis_multiarm_survival.R 6a528071beb9053e80cb1cc00d50a44e *R/f_analysis_utilities.R ff9a3c6d6c422e533ae93e7d7ed63583 *R/f_core_assertions.R 9f93ef619a13c23d40e41d14c9087f6b *R/f_core_constants.R a8bf4792645f994f49dd805aca1464b0 *R/f_core_output_formats.R be95412aae1d50ba4bb595e1f8825072 *R/f_core_plot.R f22da73c17b643329e99e1f65aa6b3e9 *R/f_core_utilities.R 9d1aa3dfb7e2adb8acd2c047f898ce72 *R/f_design_fisher_combination_test.R 5fa2713b2d43214bab057f79e408ef09 *R/f_design_group_sequential.R 4cc137684ca9ffa063d182baa94d58a7 *R/f_design_sample_size_calculator.R 8c4412a2d7031cb7b74df11ecf00e949 *R/f_design_utilities.R 1716679532dffa14597331e48769c1b6 *R/f_logger.R f06f960d100ed56d00e65ad9340b71a1 *R/f_object_r_code.R c803706733e2d15b54121f1dc00df359 *R/f_parameter_set_utilities.R 7523b3c8981997f0ca972642f317db5b *R/f_quality_assurance.R a1e120f02ae20772d22cec30f2352f9c *R/f_simulation_base_means.R 456afe1df145ff6f1a08ca2afdb08099 *R/f_simulation_base_rates.R 2d9f72aeac548a31aebb452dede0f16b *R/f_simulation_base_survival.R d913419c7abd540acb0a8a10c4206a45 *R/f_simulation_calc_subjects_function.R 3bfdc56a8101084e12ae47944d695e40 *R/f_simulation_enrichment.R f0d0eb2fdac815a2ab45843767ccd0fc *R/f_simulation_enrichment_means.R 98762e00aa6008252a97bc12e8ad5913 *R/f_simulation_enrichment_rates.R 1e6d777b5e8e00fe67fa26d7ad7b97b1 *R/f_simulation_enrichment_survival.R 95291f30f1a54933aa0b0efe4c776c86 *R/f_simulation_multiarm.R 1a347110ea2466abf2f31ccb8a21f349 *R/f_simulation_multiarm_means.R 1239db1c58ad3d25e233b09f2a99ecf3 *R/f_simulation_multiarm_rates.R 7b10e1a390a2ecaa9209c872b805ff90 *R/f_simulation_multiarm_survival.R 30164a9b7c6cea97571c10663fc766d4 *R/f_simulation_performance_score.R 46db7a010da9dcdcd4d0eefc633aa59f *R/f_simulation_utilities.R a9ba5c1d7bc9c63b09f3a50d3663d269 *R/parameter_descriptions.R 7fa3089bab9beb66ae855f773330cf68 *R/pkgname.R fc6b2e1b115d3bff735d462fe0170ef9 *README.md cb4924f3573383fca290a0a8e2ca5294 *build/partial.rdb 7a45ade3021712f54e9a8eeca0221ed9 *build/vignette.rds dc7855a151ab31082c85ae76c9a0f157 *data/dataEnrichmentMeans.RData 091c815e87ffe6c3cc1b3e2c31ca9a24 *data/dataEnrichmentMeansStratified.RData a68878fdff455c4aae090fae7c9c3f42 *data/dataEnrichmentRates.RData e772bad688953c1e5d5967059017d0d4 *data/dataEnrichmentRatesStratified.RData 2b7449e819cb7b7b8f122c06b149757d *data/dataEnrichmentSurvival.RData 8f86376898435574a9b33a5e834e711b *data/dataEnrichmentSurvivalStratified.RData 7469929d7200eb6ddbd4706a78d67926 *data/dataMeans.RData a7a6740ce9e50b4be4c53b268b1543c3 *data/dataMultiArmMeans.RData a8129d3503aaa50ba3d6659b073d52b1 *data/dataMultiArmRates.RData 17e228009d1b3183f08f7e1e0dd5e42f *data/dataMultiArmSurvival.RData 76351e2eae97fa26ec7e9d17fbd69250 *data/dataRates.RData af8575d390eff372b1188b700fac94f1 *data/dataSurvival.RData 3fdf35331c87cddde527e749d45be4e7 *data/rawDataTwoArmNormal.RData 6339831cac1c31b103a9e5b35f6e7535 *inst/WORDLIST ea8eab93de05207476b4c5f91642b478 *inst/doc/rpact_getting_started.R bef4ec3209bb566eeae27be30e7c6bc9 *inst/doc/rpact_getting_started.Rmd 983697bc351bd0daaef44d27aa0764d2 *inst/doc/rpact_getting_started.html 5a13510f7c31005bd79fa81d18398eb3 *inst/extdata/dataset_means_multi-arm.csv 122a37915d9b2e43c0211fb1c54b9f8a *inst/extdata/dataset_rates.csv af1d8773b64fbf36102bbe6b3ad0a3a5 *inst/extdata/dataset_rates_multi-arm.csv b9e82327e5d3d37b8341576d453e2f5a *inst/extdata/dataset_survival_multi-arm.csv 84fb9399bd4da792bb7757c0f23e9ec7 *inst/extdata/datasets_rates.csv d2ea13b6edd5fe8985bbd0c2171be172 *inst/tests/testthat.R dce5b4cb1c189010991df7deb50a0033 *inst/tests/testthat/test-rpact.R 6bc5afb8178fdb9f1a459752dedf8098 *man/AccrualTime.Rd b3c067857db72e63bfa63bbdfd998e84 *man/AnalysisResults.Rd f2913a89483b79b54074bf5b94684293 *man/AnalysisResultsConditionalDunnett.Rd 0e6d8fb1edb8f860fd93c9450d6e9168 *man/AnalysisResultsEnrichment.Rd d93a57f1661bceef36a5e4a23e75ab48 *man/AnalysisResultsEnrichmentFisher.Rd 778a966800fe660b361226eb3f393e02 *man/AnalysisResultsEnrichmentInverseNormal.Rd 5ae49d436f42e33ec6ce357bdd84c203 *man/AnalysisResultsFisher.Rd 88c0d5eb1dac1881596dc8a03f8a1b3d *man/AnalysisResultsGroupSequential.Rd 83388c964b4cfd641bf9d2122db2825c *man/AnalysisResultsInverseNormal.Rd 8fcce1501ee96cb3367617ec2f37b1bf *man/AnalysisResultsMultiArm.Rd d8bddc8ad89fdfc0993939437196fc0f *man/AnalysisResultsMultiArmFisher-class.Rd b8fe5ba5dfb479316cd57daffbdcc8de *man/AnalysisResultsMultiArmInverseNormal.Rd 4430fd75d1d0d312ba53572abfe2549e *man/AnalysisResultsMultiHypotheses.Rd a9019125ce2edc7650ad255542151723 *man/ClosedCombinationTestResults.Rd 7c6d12465968ace038f11c3264d394fe *man/ConditionalPowerResults.Rd 6dd4d310be5874a0775a5eb9320d7924 *man/ConditionalPowerResultsEnrichmentMeans.Rd d1c86c37525273a73adab738b1fb30be *man/ConditionalPowerResultsEnrichmentRates.Rd 8995b41e077a7caa3b400430e78038d8 *man/ConditionalPowerResultsMeans.Rd 81805d80096b3d58260ecd9689cde6fa *man/ConditionalPowerResultsRates.Rd a4a91f2e16ea283f7077779254e99f7c *man/ConditionalPowerResultsSurvival.Rd 4d6f74dff1a22ceeecee2bffa81c819e *man/Dataset.Rd 73701a91f57336082399700d7ffd2dd9 *man/DatasetMeans.Rd e46875af19b0c358c02d84c838bd3618 *man/DatasetRates.Rd 5096efed62e31953db963efb471cbaea *man/DatasetSurvival.Rd e600012a3fa856a249127976f4dbe834 *man/EventProbabilities.Rd 0e7d382fe366efadbf0330c5de6fb077 *man/FieldSet.Rd 7eb37e05f995007c78ae538ffcd85252 *man/NumberOfSubjects.Rd 76cd72190ec138b77183f2350163a0b3 *man/ParameterSet.Rd d5d80c75e4f78c552ea7fca68f94ab5f *man/PerformanceScore.Rd a4d1c6d2f0dbb9ac1c9a095941134c18 *man/PiecewiseSurvivalTime.Rd 88d38ec4639172382077fe9e39ef97c1 *man/PlotSettings.Rd 07dfb1b29e595267d80d573a57ba733b *man/PowerAndAverageSampleNumberResult.Rd 5d235bbd884aa52e309dbc1af9706325 *man/SimulationResults.Rd b94963f898e26e11ec66f0cd7df31c22 *man/SimulationResultsEnrichmentMeans.Rd b861b503e21c39e314be4f0b0f49c834 *man/SimulationResultsEnrichmentRates.Rd 17343a65344e514f9da2b87e42b08b2d *man/SimulationResultsEnrichmentSurvival.Rd 624008e3a55d677d96a67c2da22a543a *man/SimulationResultsMeans.Rd 81934893ef69dd79d2db1c6c8611e7a5 *man/SimulationResultsMultiArmMeans.Rd 8a9547df97150234ef1cc868a08c4254 *man/SimulationResultsMultiArmRates.Rd fa18e4d09f89ec4d7b70c71d32a34d5c *man/SimulationResultsMultiArmSurvival.Rd 9f7e236bcd9c6116ef3089b66a74dcac *man/SimulationResultsRates.Rd 54a4232d426e2ea824749b4128d990c2 *man/SimulationResultsSurvival.Rd 70e649693f831aeb6b7f37c597a5b0a1 *man/StageResults.Rd 3374317e0db9d432b358b365b96aeecf *man/StageResultsEnrichmentMeans.Rd fb1a8742cf59ac35a808203dcfbfc195 *man/StageResultsEnrichmentRates.Rd 72ca31d40e9dcb8827d29689ce8bcbd6 *man/StageResultsEnrichmentSurvival.Rd 4add18af63486bc25871cd43903442a4 *man/StageResultsMeans.Rd ed3638770650bf0d9818fe74d9ebc83c *man/StageResultsMultiArmMeans.Rd 30995fc39d4d00aaff3b3aaa2efcff7a *man/StageResultsMultiArmRates.Rd f6b1b7fab2c4deb9132f9090f7d268c5 *man/StageResultsMultiArmSurvival.Rd ecce136a89e270f75944436d621f211b *man/StageResultsRates.Rd 802ce6b973ed75ed1a2d6ca7478d5cda *man/StageResultsSurvival.Rd 37060ef3405dd944da1c6b1ae9b7b40b *man/SummaryFactory.Rd 781801b75a6a88bf027de76bd11db3ef *man/TrialDesign.Rd efdbe4c1ad07d4f999c0e5a3beb6ff2d *man/TrialDesignCharacteristics.Rd 91f247efedd686fecc138d953ee575b2 *man/TrialDesignConditionalDunnett.Rd a0e66b56fb6698a9b34e790399a66d45 *man/TrialDesignFisher.Rd bc4e1bffe3095d2a220284c2a468fe41 *man/TrialDesignGroupSequential.Rd 1c10384388dd204be13018e63f653c02 *man/TrialDesignInverseNormal.Rd b51ef81d439de687082aa2fe4a27224a *man/TrialDesignPlan.Rd 63799f278247de6e95384721b538bf31 *man/TrialDesignPlanMeans.Rd d9642c6b41c8eb06074405c671c38fa3 *man/TrialDesignPlanRates.Rd ca047727178ae2da24f7236701fe6ce4 *man/TrialDesignPlanSurvival.Rd 305d91f3f5a6665816aaefde0d429a7b *man/TrialDesignSet.Rd 885586501bb6d0c25019b53356ddc1b7 *man/as.data.frame.AnalysisResults.Rd 3ef536b3ae2392a36a3122a43a4ed4cb *man/as.data.frame.ParameterSet.Rd b08540894557b08c8d37007a5badd977 *man/as.data.frame.PowerAndAverageSampleNumberResult.Rd 9da17eea9e3b97a905c8aa9068710e69 *man/as.data.frame.StageResults.Rd e0717d982872b7145c24c77db0c061f4 *man/as.data.frame.TrialDesign.Rd 4d2c321509e87fd726e20e15316a844c *man/as.data.frame.TrialDesignCharacteristics.Rd 4a59773788f56fa95d080377dafcc563 *man/as.data.frame.TrialDesignPlan.Rd 22f7d0657bd0d57e43ded9fd647f361e *man/as.data.frame.TrialDesignSet.Rd 0203305bcebfc3a5b43c96a4e41a23ba *man/as.matrix.FieldSet.Rd 99e4e1c911d82d96c2a5fa5b14ff4c5c *man/dataEnrichmentMeans.Rd 599d18ccbe0c5358e7e93c2a319c6f51 *man/dataEnrichmentMeansStratified.Rd f068b0d8bbab5fb72b5fc11c36739c77 *man/dataEnrichmentRates.Rd ebde73db2c4c21db843bb451df861e99 *man/dataEnrichmentRatesStratified.Rd e65eb48720770798832704ba6331bdcd *man/dataEnrichmentSurvival.Rd 9c99224c1ce075faca74f31317368e3c *man/dataEnrichmentSurvivalStratified.Rd d9b31dd8178afe65b6a1b03a087a7e7b *man/dataMeans.Rd a070d09c762ac67fde159df001ccf313 *man/dataMultiArmMeans.Rd 491b16f585b16eb19a220440baec58e8 *man/dataMultiArmRates.Rd 16c4ea767f48661c6040b50fc9264859 *man/dataMultiArmSurvival.Rd fd12134fd97505f5315a5f3ca2cd72e5 *man/dataRates.Rd d4334070d9f19b2734b621c68b9afb31 *man/dataSurvival.Rd 90d6d2325046e7630464fea0f7c43db4 *man/getAccrualTime.Rd 766a6bfa9a8df3f2aa0afa180ec1f76b *man/getAnalysisResults.Rd 3ab62f445cdb61435317111ff2934971 *man/getAvailablePlotTypes.Rd 18dc8b3c42dd99977715b8af4e22a215 *man/getClosedCombinationTestResults.Rd 37ac023423f58e3c7bdcc84760a78605 *man/getClosedConditionalDunnettTestResults.Rd 7fb172235455baf72a9a8a8bebd2a49d *man/getConditionalPower.Rd d5465ab978a4b6dc9a7e816367db8975 *man/getConditionalRejectionProbabilities.Rd b0722c65ba779f31a8d01e14ed02f6e5 *man/getData.Rd a5a4219645096292701f97ab03af7718 *man/getDataset.Rd a6eb09ee64c2be97756203019eddd054 *man/getDesignCharacteristics.Rd 4d8251d788431c64bc713e4fab557039 *man/getDesignConditionalDunnett.Rd c9e4037078ae762defaf8ad59d0040a7 *man/getDesignFisher.Rd a681057bba443e78cbe23e7fe40fcb87 *man/getDesignGroupSequential.Rd f922976034943e2a515035bf8fbeb4c4 *man/getDesignInverseNormal.Rd 185010a1a195183a2c19ad204c5305f9 *man/getDesignSet.Rd ff154f25a58afec01b509521920b2dfa *man/getEventProbabilities.Rd 29bd2b43b2bc828b9e8875b513e80047 *man/getFinalConfidenceInterval.Rd 3fd9ad38c672a9c520a7fa4b750daea8 *man/getFinalPValue.Rd 9625ac1b56df07c2c4a5dc4f92fbdf2e *man/getGroupSequentialProbabilities.Rd f19a0d0a4dae759aa75f6083ced28aaa *man/getLambdaStepFunction.Rd aff0b30dc5dfe808a6ce669958dbfe35 *man/getLogLevel.Rd caaf05994a4d24fa053b4f2001be3130 *man/getLongFormat.Rd e1b07b0aefceeb19eae68e44a87fe80f *man/getNumberOfSubjects.Rd e70e8187fdbd94dafdc6defe765f2058 *man/getObjectRCode.Rd 51aad6d8c881592669245e27b3a037d9 *man/getObservedInformationRates.Rd 628a4b6830e57867843aef89c5400d10 *man/getOutputFormat.Rd 01cfdc69d10c723d039067f2a76f72ec *man/getParameterCaption.Rd 4e9770b1ebb4b3a83ebbf16cacdcab71 *man/getParameterName.Rd 709f1fd3063eb86f2897a30f514f6d84 *man/getPerformanceScore.Rd efb685b78f3c7262be920ea3ad094a3c *man/getPiecewiseSurvivalTime.Rd 3e58940c91208a14c44b0ad399d40e07 *man/getPlotSettings.Rd 8bf5b85a4cb2551b90d9505dfec28992 *man/getPowerAndAverageSampleNumber.Rd 8bbd4afb234f2e97f92870328041dd4b *man/getPowerMeans.Rd 594ffa0c68990ed8b11b6d83985a9bb4 *man/getPowerRates.Rd 7a45883c3155afaf325df0900310d219 *man/getPowerSurvival.Rd 48c3a0a347fd550cf5e25c8c3e159f6b *man/getRawData.Rd a8b71cbf6d8c66b9653750f4a611d106 *man/getRepeatedConfidenceIntervals.Rd 81d4ecf08627cd4435e2930396646acd *man/getRepeatedPValues.Rd 4325b612ad90f85b23419bff16f6d3dd *man/getSampleSizeMeans.Rd 3bbbae28b5b5393b1212b89c77c7f38a *man/getSampleSizeRates.Rd 20dcd4ef14e61324d72c90791c08e615 *man/getSampleSizeSurvival.Rd 1d809353ec01d8a6977bc1ac112884c0 *man/getSimulationEnrichmentMeans.Rd 4cd475f02ed39021bd5df1b173a64bc3 *man/getSimulationEnrichmentRates.Rd 393fd776e8475021781a77eb15d18ede *man/getSimulationEnrichmentSurvival.Rd 53ba3b93c6f7716f44e6f570adf23ad4 *man/getSimulationMeans.Rd 278afd563a7e5d781a9356e128a8e5c2 *man/getSimulationMultiArmMeans.Rd c82501b74d25ddec9bddf79ea0e446c3 *man/getSimulationMultiArmRates.Rd 1b7f7f341685c3ae39b67d1f5c707580 *man/getSimulationMultiArmSurvival.Rd 3f30308a3195b948dbaa8413459dffa2 *man/getSimulationRates.Rd 0031557ac606f64b345fd38654be9959 *man/getSimulationSurvival.Rd 38b6a242971b1253407d226d9b0ca498 *man/getStageResults.Rd 258096690b4bf24f1540279ea975c0fa *man/getTestActions.Rd 35f2ca843be3fd4145afeb97b0b048a0 *man/getWideFormat.Rd 6a7a7f4615171e90566248fb1348e3ee *man/kable.ParameterSet.Rd 6c739f907ea2c06340f431ab9df37986 *man/kable.Rd fc8b7c2367ce27a0170d454c3243fd37 *man/knit_print.ParameterSet.Rd 91e5a3674cfb2097b67237ab67b4e808 *man/length.TrialDesignSet.Rd 4513ec5ff3ed8f111c95839cc868ca7b *man/names.AnalysisResults.Rd 2afad87ad03cb090b0ddcd05b95ea478 *man/names.FieldSet.Rd 1d0eb034bf333a7395b71e4f5bc8a262 *man/names.SimulationResults.Rd a293dc3708eeed113cd82bb59266a7f4 *man/names.StageResults.Rd 2d4c481f9e8bd8fdcf19d09953833d7a *man/names.TrialDesignSet.Rd 11f0420dbe5201f0f2c6c8055882ad43 *man/param_accrualIntensity.Rd bd5cca8ce10525d7a2fb0d9c370efe4c *man/param_accrualIntensityType.Rd 2c635830e56baa461cf320a4c73c71f6 *man/param_accrualTime.Rd c460295095732d20f3962e72df4b6c4a *man/param_activeArms.Rd 4ab14fb6c80e11644745cf40fe45c748 *man/param_adaptations.Rd 202e8939f469ec25b2e8cbec30a2fdb5 *man/param_allocationRatioPlanned.Rd b645603ae52603cf7b5739ea8ff3ac87 *man/param_allocationRatioPlanned_sampleSize.Rd 0763b021fe1cf8cfb4675165da9dea2c *man/param_alpha.Rd 589c56a62f4d752f2047692e501ea2e2 *man/param_alternative.Rd 1b0bc73d5d88db4bf172fda4623d73eb *man/param_alternative_simulation.Rd c4420fc4e9619be6c479384eccc122b2 *man/param_beta.Rd a8177aa759a9f0e5747b3d66262ef2c3 *man/param_bindingFutility.Rd 4b58027e75f6f1fed97143098e2a82de *man/param_calcEventsFunction.Rd d480199871ba9506f08ca46d26eac69e *man/param_calcSubjectsFunction.Rd 10927b51b3f097a4ba617ce7d4b0cd2d *man/param_conditionalPower.Rd c39140187e79a10e6b4756243f957fd1 *man/param_conditionalPowerSimulation.Rd 9e5037f402bea27e36087e9ce6e72cc7 *man/param_dataInput.Rd aff0e361ce130658bb3d1cfab5f01269 *man/param_design.Rd 3cf562e9f8e6125f0a1cd1c47d43b322 *man/param_design_with_default.Rd ecec9814c99f0669ef60125b24313ba6 *man/param_digits.Rd a7141b9a481a1ac98d09e38bb3777651 *man/param_directionUpper.Rd 1998f8e96e3e794b8dcb0ff8670e4960 *man/param_dropoutRate1.Rd 872d754626b1957695602c9b9cfee69b *man/param_dropoutRate2.Rd 8a36f0b7d2195ab9733b3187ddd0c803 *man/param_dropoutTime.Rd 61bc4621425cde5b2e96aacc1da9c60a *man/param_effectList.Rd e781d838bda29920c3988e969b5a6767 *man/param_effectMatrix.Rd 7423bb513eb6a5b8588827a1c76a6fb9 *man/param_effectMeasure.Rd 5c68f780306e9b9829e93af48dd471d3 *man/param_epsilonValue.Rd 256091f212b435e016060db29f75b161 *man/param_eventTime.Rd 277e81f4207ce6dc6c37f384f7fd6264 *man/param_gED50.Rd b6b4c6f573de4794b50597eace3765f6 *man/param_grid.Rd a76a45988a339c142b44d77ead737a91 *man/param_groups.Rd 9f6e7527a3bd3caebcdb4475abfd6486 *man/param_hazardRatio.Rd d33b893f7a6f2c0b475d76657a4d3bee *man/param_includeAllParameters.Rd 8607ad8934aae0d868dd13ff62fab099 *man/param_informationEpsilon.Rd 562388d3ee93933432105b3f857d8eb8 *man/param_informationRates.Rd 946c186c3b836aea808f859ac0f3a642 *man/param_intersectionTest_Enrichment.Rd 338009db7be273e5971b6cb59e7d7870 *man/param_intersectionTest_MultiArm.Rd b29d84820a9ccd724738bcf30893ecbe *man/param_kMax.Rd 09f937415a0df841ab04f4de47372043 *man/param_kappa.Rd 1b5b34901b5fc0016a96de2db8bfcbf1 *man/param_lambda1.Rd fd2db24faacac2ac94141a85a29d64f6 *man/param_lambda2.Rd 00905ccdc5266b9194930b558a89bf17 *man/param_legendPosition.Rd d3cd3b0facf2bf0322eac00ee7c30837 *man/param_maxInformation.Rd ea6da87da19e62c0fc284f851487ff00 *man/param_maxNumberOfEventsPerStage.Rd f63216afcdb54efd4bd31d9671175367 *man/param_maxNumberOfIterations.Rd 66c96ac2c9d48c7698034798ec59f1cc *man/param_maxNumberOfSubjects.Rd 71d57414a5e6de1cbc979019d3e755e8 *man/param_maxNumberOfSubjectsPerStage.Rd 707b5318bbd39b60a6240d53bc823048 *man/param_maxNumberOfSubjects_survival.Rd 048e2dd6495b98f9536a6d8267018c51 *man/param_median1.Rd 6ed2ad486c98ea49068fefbd0c51681b *man/param_median2.Rd 343f3394cc684978efe7f4ef1d9716e0 *man/param_minNumberOfEventsPerStage.Rd 843fa3c496d96ca5bd33f4603f4ced42 *man/param_minNumberOfSubjectsPerStage.Rd 7bab9bb792a175b64de0bc5ff98974a8 *man/param_nMax.Rd f63a5826b495e11c583cdd7c4367b508 *man/param_nPlanned.Rd 3c6dc40623c973117922f06861f07d14 *man/param_niceColumnNamesEnabled.Rd 67e5bf3db86158c8ef9e1956949516d8 *man/param_normalApproximation.Rd a26e2726a4a8b4f665d7bcbd75000f40 *man/param_palette.Rd cdf5581ff03f66c184b2f1cf22281785 *man/param_pi1_rates.Rd 510577eb6844590884d1410ce62d12f4 *man/param_pi1_survival.Rd ede4b048ac2c270e6492eb2c51b551f5 *man/param_pi2_rates.Rd 80ee370eda9051ba80f6af1c89cb6db6 *man/param_pi2_survival.Rd 11168a682124ab1a85c8ce5d4d5aafac *man/param_piecewiseSurvivalTime.Rd 8712010e58f30688cb0257e2a06f1feb *man/param_plannedEvents.Rd d8e749e11d22a79fcc31010747f15ae5 *man/param_plannedSubjects.Rd 6fdcf288638656e9c02488bf66070070 *man/param_plotPointsEnabled.Rd 39226e043e02acd68de4014234974856 *man/param_plotSettings.Rd ff6d729004d9928988a2829dd0f22698 *man/param_populations.Rd 8f51d540ec89c9964e1c4e05523c0fdc *man/param_rValue.Rd f53f00dd03f01245cb33d5ac44ec6208 *man/param_seed.Rd d42668b703a20ed5aee995b8179aa454 *man/param_selectArmsFunction.Rd 1149ac27644d7f1939a7ad654c486b03 *man/param_selectPopulationsFunction.Rd 404d9fc183bd28dfce7c7933cf2e8b0f *man/param_showSource.Rd fe8e1dfc5cc4933e20d6cdda2b8cd574 *man/param_showStatistics.Rd d615cc8589478b5bc791857a24efd836 *man/param_sided.Rd f62ca94aea4107c12ee36f0a5944f24a *man/param_slope.Rd 7dc2f3e58087b796a26ccad5ef291f7f *man/param_stDev.Rd f020dd1aea067b2e93a768a627c58afc *man/param_stDevH1.Rd bf7f69d494bd8befc050d611de6119df *man/param_stDevSimulation.Rd 7e03013b17ab5ce9f442971b9040d565 *man/param_stage.Rd e4061163acc592e1c8a1db492ba5c1b4 *man/param_stageResults.Rd 4847ce0b3aa3c9489a5396669225876f *man/param_stratifiedAnalysis.Rd 9a35bcfd6dab3e8806cb76e2a7f24d59 *man/param_successCriterion.Rd dd4caafcb25884be5c2121f6b674bc41 *man/param_theta.Rd 532d5244adb1f5e3fb5268b316668421 *man/param_thetaH0.Rd 511b49439de873e347fa844c721fb2d4 *man/param_thetaH1.Rd 215194c8c57b9f06ec84c6cef9e8f987 *man/param_three_dots.Rd 3da3359d32ae64d345dce80888e78f86 *man/param_three_dots_plot.Rd 6e187f0223b3b9272c556f5e39034c6b *man/param_threshold.Rd e3f7e8eb249ba21ad94e28f4bb3598e2 *man/param_tolerance.Rd 5188421527b848d9d7811cacc9fc667c *man/param_typeOfComputation.Rd b34b5fe8adc2b3586d7bc76cd656bca1 *man/param_typeOfDesign.Rd dd36d6d9e3c718ff372e6c19115e86be *man/param_typeOfSelection.Rd 716c56a48fdb423436558e4dec2ec5b6 *man/param_typeOfShape.Rd aa8ca8eaa50831945cdfb3ad3e79d069 *man/param_userAlphaSpending.Rd 5f1ad8c3d0980362ff6c1308aa1976e7 *man/param_varianceOption.Rd ce7567fa8b3eaec601605f7612bebd02 *man/plot.AnalysisResults.Rd fc6fe0eab6056d9e2af46e6266a2b5be *man/plot.Dataset.Rd 3a88d8fea14f5ac82b1a22d7d9a747d6 *man/plot.EventProbabilities.Rd 7c024f1bd4cb677aab1518d5f0b9de3e *man/plot.NumberOfSubjects.Rd a26dc375022166a42c00568d3db6e975 *man/plot.ParameterSet.Rd 1db1aecb47ca0307dae1eb26f3a5795a *man/plot.SimulationResults.Rd adff4d30bb3577ae9082214ea3695e94 *man/plot.StageResults.Rd ea210f2d78ecbd3a0c04270737a7951d *man/plot.SummaryFactory.Rd f2ab0bed0eb0bf167fe7f8373c803aef *man/plot.TrialDesign.Rd ba939bdd7d5920004850303e70b5ef5b *man/plot.TrialDesignPlan.Rd ae39d0ca41fd797b797c9bf75f653cb3 *man/plot.TrialDesignSet.Rd 6deb01ae0f092e5f04a085b28d85e41d *man/print.Dataset.Rd 0bfaf4547d2ded2824254aee1fb394c1 *man/print.FieldSet.Rd 72609fa182dd5c1cb6299c3d53d21ade *man/print.ParameterSet.Rd 7d9ca440dcff06412feb6e35d9565ba4 *man/print.SimulationResults.Rd 4f88a1089cd1603512e7381b89801996 *man/print.SummaryFactory.Rd 28b69019be1d384f80c9005fc90f0f6c *man/print.TrialDesignCharacteristics.Rd 5e81dc3042d3f03e3c28a7862d814787 *man/printCitation.Rd 90f0fee0844af80db1caf23fd6323602 *man/rawDataTwoArmNormal.Rd 078b11e39d05e43f413ab876c33dc4f3 *man/readDataset.Rd b4055f46013b62d713214e69b66271cd *man/readDatasets.Rd ee262ec055075e88ee9b252a4c5200fa *man/resetLogLevel.Rd 4be3b2a3063fc18f625796b931de094d *man/roxygen/meta.R dc6d42160b8e79d355f533577f8296f1 *man/rpact.Rd bcd12a21f391ac7858031b6983499235 *man/setLogLevel.Rd 1131b555308848777863b2ff5ba1d93f *man/setOutputFormat.Rd 67e47c59ef377f3f132cfbec7f6cbb5a *man/sub-TrialDesignSet-method.Rd 6042c58d3a21930eda80de9c1c0858fe *man/summary.AnalysisResults.Rd eb49a69d31601f5c9b52e43d968decad *man/summary.Dataset.Rd aa90cca8955cfec96e88b96888326a27 *man/summary.ParameterSet.Rd c11067af17661b875f5d8df28bf8753f *man/summary.TrialDesignSet.Rd 29c9e9e51ba7e8690102792d2b96ae88 *man/t-FieldSet-method.Rd 585f52dc5c3e77632300338ba663067e *man/testPackage.Rd c0365411f14c33b3c24416da385bdcdb *man/test_plan_section.Rd cbcbe474787761a429c22810ebe0b46a *man/utilitiesForPiecewiseExponentialDistribution.Rd e3dd39277bb07bf84662bf9acf337cc7 *man/utilitiesForSurvivalTrials.Rd 8c29e7c21e9cc380bbeac42b2d1da2cd *man/writeDataset.Rd 3d77b886be18debf313870f62bdf7f68 *man/writeDatasets.Rd d5fe9006535a74f204bb244d5137a10d *src/RcppExports.cpp fa4ebcc5c3edfaf98aafae932ccab5cb *src/f_assertions.cpp 2c8516a98c8789c240146ae479a84f26 *src/f_assertions.h 178d152b27cc18cfcd82236ad6fba9c4 *src/f_design_fisher_combination_test.cpp 83b7163f8e68a4bb6038eda5ca5c9eb8 *src/f_design_group_sequential.cpp 4a244812a46feff3dd52f9d1b10f1093 *src/f_simulation_base_means.cpp 56458f0341e698ecb95fe2ac313f3e56 *src/f_simulation_base_rates.cpp ca892356c3352c24053823e15a7e8243 *src/f_simulation_base_survival.cpp f326f18316a1879a34c52d653a83b054 *src/f_simulation_survival_utilities.cpp 88977325c52d09490d8d9799b7dc4d5f *src/f_simulation_survival_utilities.h 95e0f96302ba28bb6d296b73955452b5 *src/f_utilities.cpp b91dd75de86027eaaf209ade42455337 *src/f_utilities.h 19e1e0708c68b94a1f0593816e53e4d0 *src/rpact_types.h d2ea13b6edd5fe8985bbd0c2171be172 *tests/testthat.R 0a9fcbfdf8049ad466445692abad3080 *tests/testthat/helper-class_analysis_dataset.R 63e2625cc6d93880d38355b5124c12e2 *tests/testthat/helper-f_analysis_base_means.R e37ad6127f137cf2addd6dbdb4bc1393 *tests/testthat/helper-f_analysis_base_rates.R e37ad6127f137cf2addd6dbdb4bc1393 *tests/testthat/helper-f_analysis_base_survival.R a80f155d3712d98553571b6ecda0e2d8 *tests/testthat/helper-f_core_assertions.R 2593839d9fedfb2b76d0aff246163e8f *tests/testthat/helper-f_core_output_formats.R 6b528cb598735554de4ac0d462247cb1 *tests/testthat/helper-f_core_utilities.R edddebc5a42a6388527637c8889da111 *tests/testthat/test-class_analysis_dataset.R fc6d0dcc69474a4a369827282bf81a1e *tests/testthat/test-class_core_plot_settings.R 411b9233d776b106966b3d204e122a1a *tests/testthat/test-class_design_plan.R 483e49f5f0a3f123909d47b873628555 *tests/testthat/test-class_design_set.R d83454b259d2e583c268cf36ce31a0ed *tests/testthat/test-class_simulation_results.R 93d655b41a5ffe6bd1bb0483aeb3c288 *tests/testthat/test-class_summary.R d4b9afb2a92f8c14b939758617fbd1b8 *tests/testthat/test-class_time.R c84e76ad7f8995119bfe8e3f9fab4fc1 *tests/testthat/test-f_analysis_base_means.R 82d73d61443755fc1a4db0ddefb68bec *tests/testthat/test-f_analysis_base_rates.R aa4925632ce50aae6594652df5e3f2e9 *tests/testthat/test-f_analysis_base_survival.R a538486de0a4fcaa326407d46a5f1592 *tests/testthat/test-f_analysis_enrichment_means.R a3c7bda100ea864c5b3bbf76d3e5348e *tests/testthat/test-f_analysis_enrichment_rates.R 71b1c5d0ea4a3e94f9436f62236ca643 *tests/testthat/test-f_analysis_enrichment_survival.R b451140b62926dff7a0a11b3d09ae612 *tests/testthat/test-f_analysis_input_validation.R 50601383b653362f6eddf49a9039274e *tests/testthat/test-f_analysis_multiarm_means.R 8bb40503b77c0315c8beead316f9a74e *tests/testthat/test-f_analysis_multiarm_rates.R 135dc209e87c140a7b11e935d8e3aa6c *tests/testthat/test-f_analysis_multiarm_survival.R bf266cb3c29db2f289aa662a17182d32 *tests/testthat/test-f_analysis_utilities.R 686004b2ed1e593c8d466e842f67e97c *tests/testthat/test-f_core_assertions.R 0bfffb4519a842ee373287547f33e7a5 *tests/testthat/test-f_core_output_formats.R 74347154afb1e4c63d1fec3d2e3abf6d *tests/testthat/test-f_core_plot.R 9f7a96a86c67be2b7a4aaffc6b4bf5ca *tests/testthat/test-f_core_utilities.R bf9e83e3633f526c12af2a6c3945d6c6 *tests/testthat/test-f_design_fisher_combination_test.R 26360a64fdc49375135b32be5914f713 *tests/testthat/test-f_design_group_sequential.R 7cdc9807db8d2532f3794bd2ccf1b1ec *tests/testthat/test-f_design_group_sequential_beta_spending.R c407cd7681996ef902d3c31aba2f8b73 *tests/testthat/test-f_design_power_calculator.R 7d52fd14f28bec9f1fa82b68318d8c66 *tests/testthat/test-f_design_sample_size_calculator.R b8b3860a48539385e6123b6692ddbfec *tests/testthat/test-f_design_utilities.R 4050fa22f4edb1922bdaa813f6bb700a *tests/testthat/test-f_logger.R a3d5b786a29e1716dd45e0ccd6893a9b *tests/testthat/test-f_parameter_set_utilities.R 0dea3a3e26b3fdff428c3ef3a74c56f7 *tests/testthat/test-f_quality_assurance.R 3a9f6b9c7d3e0a2eae93a8bf97c68672 *tests/testthat/test-f_simulation_base_means.R e8ce2ef1e962180a6e6942808d73c57c *tests/testthat/test-f_simulation_base_rates.R 6a3d6a10913687882de1d6ee625df1c1 *tests/testthat/test-f_simulation_base_survival.R e9abd7f7d5817dfe80534b8759ec6bc5 *tests/testthat/test-f_simulation_enrichment_means.R 21affae4f080869b5b9742b07e1582d1 *tests/testthat/test-f_simulation_enrichment_rates.R fc61058a9887f8d449e6abd557724e17 *tests/testthat/test-f_simulation_enrichment_survival.R a01b50f81e080b50e2af50fc9b2bd2b2 *tests/testthat/test-f_simulation_multiarm_means.R a43706a6c10db6e089ab1c254aa8b310 *tests/testthat/test-f_simulation_multiarm_rates.R 27383ef530f57d03eb49cc449cf7c531 *tests/testthat/test-f_simulation_multiarm_survival.R 04c7d5547109624ba971f7442f952516 *tests/testthat/test-f_simulation_performance_score.R 4c56c1251683848aa4ca30f4d76e4d88 *tests/testthat/test-generic_functions.R 38dd547a2f3c8437c47e210280359053 *tests/testthat/test-pkgname.R bef4ec3209bb566eeae27be30e7c6bc9 *vignettes/rpact_getting_started.Rmd rpact/inst/0000755000176200001440000000000014450551404012342 5ustar liggesusersrpact/inst/doc/0000755000176200001440000000000014450551401013104 5ustar liggesusersrpact/inst/doc/rpact_getting_started.R0000644000176200001440000000022514450551401017606 0ustar liggesusers## ----setup, include = FALSE--------------------------------------------------- knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) rpact/inst/doc/rpact_getting_started.html0000644000176200001440000003357014450551401020362 0ustar liggesusers Getting started with rpact

Getting started with rpact

Friedrich Pahlke and Gernot Wassmer

2023-07-03

Confirmatory Adaptive Clinical Trial Design, Simulation, and Analysis.

Functional Range

  • Sample size and power calculation for
    • means (continuous endpoint)
    • rates (binary endpoint)
    • survival trials with
      • piecewise accrual time and intensity
      • piecewise exponential survival time
      • survival times that follow a Weibull distribution
  • Fixed sample design and designs with interim analysis stages
  • Simulation tool for means, rates, and survival data
    • Assessment of adaptive sample size/event number recalculations based on conditional power
    • Assessment of treatment selection strategies in multi-arm trials
  • Adaptive analysis of means, rates, and survival data
  • Adaptive designs and analysis for multi-arm trials
  • Simulation and analysis for enrichment designs testing means, rates, and hazard ratios

Learn to use rpact

We recommend three ways to learn how to use rpact:

  1. Use the Shiny app: shiny.rpact.com
  2. Use the Vignettes: www.rpact.org/vignettes
  3. Book a training: www.rpact.com

Vignettes

The vignettes are hosted at www.rpact.org/vignettes and cover the following topics:

  1. Defining Group Sequential Boundaries with rpact
  2. Designing Group Sequential Trials with Two Groups and a Continuous Endpoint with rpact
  3. Designing Group Sequential Trials with a Binary Endpoint with rpact
  4. Designing Group Sequential Trials with Two Groups and a Survival Endpoint with rpact
  5. Simulation-Based Design of Group Sequential Trials with a Survival Endpoint with rpact
  6. An Example to Illustrate Boundary Re-Calculations during the Trial with rpact
  7. Analysis of a Group Sequential Trial with a Survival Endpoint using rpact
  8. Defining Accrual Time and Accrual Intensity with rpact
  9. How to use R Generics with rpact
  10. How to Create Admirable Plots with rpact
  11. Comparing Sample Size and Power Calculation Results for a Group Sequential Trial with a Survival Endpoint: rpact vs. gsDesign
  12. Supplementing and Enhancing rpact’s Graphical Capabilities with ggplot2
  13. Using the Inverse Normal Combination Test for Analyzing a Trial with Continuous Endpoint and Potential Sample Size Re-Assessment with rpact
  14. Planning a Trial with Binary Endpoints with rpact
  15. Planning a Survival Trial with rpact
  16. Simulation of a Trial with a Binary Endpoint and Unblinded Sample Size Re-Calculation with rpact
  17. How to Create Summaries with rpact
  18. How to Create One- and Multi-Arm Analysis Result Plots with rpact
  19. How to Create One- and Multi-Arm Simulation Result Plots with rpact
  20. Simulating Multi-Arm Designs with a Continuous Endpoint using rpact
  21. Analysis of a Multi-Arm Design with a Binary Endpoint using rpact
  22. Step-by-Step rpact Tutorial
  23. Planning and Analyzing a Group-Sequential Multi-Arm Multi-Stage Design with Binary Endpoint using rpact
  24. Two-arm analysis for continuous data with covariates from raw data (exclusive)
  25. How to install the latest developer version (exclusive)
  26. Delayed Response Designs with rpact

User Concept

Workflow

  • Everything is starting with a design, e.g.: design <- getDesignGroupSequential()
  • Find the optimal design parameters with help of rpact comparison tools: getDesignSet
  • Calculate the required sample size, e.g.: getSampleSizeMeans(), getPowerMeans()
  • Simulate specific characteristics of an adaptive design, e.g.: getSimulationMeans()
  • Collect your data, import it into R and create a dataset: data <- getDataset()
  • Analyze your data: getAnalysisResults(design, data)

Focus on Usability

The most important rpact functions have intuitive names:

  • getDesign[GroupSequential/InverseNormal/Fisher]()
  • getDesignCharacteristics()
  • getSampleSize[Means/Rates/Survival]()
  • getPower[Means/Rates/Survival]()
  • getSimulation[MultiArm/Enrichment]`[Means/Rates/Survival]()`
  • getDataSet()
  • getAnalysisResults()
  • getStageResults()

RStudio/Eclipse: auto code completion makes it easy to use these functions.

R generics

In general, everything runs with the R standard functions which are always present in R: so-called R generics, e.g., print, summary, plot, as.data.frame, names, length

Utilities

Several utility functions are available, e.g.

  • getAccrualTime()
  • getPiecewiseSurvivalTime()
  • getNumberOfSubjects()
  • getEventProbabilities()
  • getPiecewiseExponentialDistribution()
  • survival helper functions for conversion of pi, lambda and median, e.g., getLambdaByMedian()
  • testPackage(): installation qualification on a client computer or company server (via unit tests)

Validation

Please contact us to learn how to use rpact on FDA/GxP-compliant validated corporate computer systems and how to get a copy of the formal validation documentation that is customized and licensed for exclusive use by your company, e.g., to fulfill regulatory requirements.

About

  • rpact is a comprehensive validated1 R package for clinical research which
    • enables the design and analysis of confirmatory adaptive group sequential designs
    • is a powerful sample size calculator
    • is a free of charge open-source software licensed under LGPL-3
    • particularly, implements the methods described in the recent monograph by Wassmer and Brannath (2016)

For more information please visit www.rpact.org

  • RPACT is a company which offers
    • enterprise software development services
    • technical support for the rpact package
    • consultancy and user training for clinical research using R
    • validated software solutions and R package development for clinical research

For more information please visit www.rpact.com


  1. The rpact validation documentation is available exclusively for our customers and supporting companies. For more information visit www.rpact.com/services/sla↩︎

rpact/inst/doc/rpact_getting_started.Rmd0000644000176200001440000001563714450500430020140 0ustar liggesusers--- title: "Getting started with rpact" author: "Friedrich Pahlke and Gernot Wassmer" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Getting started with rpact} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} --- ```{r setup, include = FALSE} knitr::opts_chunk$set( collapse = TRUE, comment = "#>" ) ``` Confirmatory Adaptive Clinical Trial Design, Simulation, and Analysis. ## Functional Range * Sample size and power calculation for + means (continuous endpoint) + rates (binary endpoint) + survival trials with - piecewise accrual time and intensity - piecewise exponential survival time - survival times that follow a Weibull distribution * Fixed sample design and designs with interim analysis stages * Simulation tool for means, rates, and survival data + Assessment of adaptive sample size/event number recalculations based on conditional power + Assessment of treatment selection strategies in multi-arm trials * Adaptive analysis of means, rates, and survival data * Adaptive designs and analysis for multi-arm trials * Simulation and analysis for enrichment designs testing means, rates, and hazard ratios ## Learn to use rpact We recommend three ways to learn how to use `rpact`: > 1. Use the Shiny app: [shiny.rpact.com](https://www.rpact.com/products#public-rpact-shiny-app) > 2. Use the Vignettes: > [www.rpact.org/vignettes](https://www.rpact.org/vignettes/) > 3. Book a training: > [www.rpact.com](https://www.rpact.com/services#learning-and-training) ### Vignettes The vignettes are hosted at [www.rpact.org/vignettes](https://www.rpact.org/vignettes/) and cover the following topics: 1. Defining Group Sequential Boundaries with rpact 2. Designing Group Sequential Trials with Two Groups and a Continuous Endpoint with rpact 3. Designing Group Sequential Trials with a Binary Endpoint with rpact 4. Designing Group Sequential Trials with Two Groups and a Survival Endpoint with rpact 5. Simulation-Based Design of Group Sequential Trials with a Survival Endpoint with rpact 6. An Example to Illustrate Boundary Re-Calculations during the Trial with rpact 7. Analysis of a Group Sequential Trial with a Survival Endpoint using rpact 8. Defining Accrual Time and Accrual Intensity with rpact 9. How to use R Generics with rpact 10. How to Create Admirable Plots with rpact 11. Comparing Sample Size and Power Calculation Results for a Group Sequential Trial with a Survival Endpoint: rpact vs. gsDesign 12. Supplementing and Enhancing rpact’s Graphical Capabilities with ggplot2 13. Using the Inverse Normal Combination Test for Analyzing a Trial with Continuous Endpoint and Potential Sample Size Re-Assessment with rpact 14. Planning a Trial with Binary Endpoints with rpact 15. Planning a Survival Trial with rpact 16. Simulation of a Trial with a Binary Endpoint and Unblinded Sample Size Re-Calculation with rpact 17. How to Create Summaries with rpact 18. How to Create One- and Multi-Arm Analysis Result Plots with rpact 19. How to Create One- and Multi-Arm Simulation Result Plots with rpact 20. Simulating Multi-Arm Designs with a Continuous Endpoint using rpact 21. Analysis of a Multi-Arm Design with a Binary Endpoint using rpact 22. Step-by-Step rpact Tutorial 23. Planning and Analyzing a Group-Sequential Multi-Arm Multi-Stage Design with Binary Endpoint using rpact 24. Two-arm analysis for continuous data with covariates from raw data (*exclusive*) 25. How to install the latest developer version (*exclusive*) 26. Delayed Response Designs with rpact ## User Concept ### Workflow * Everything is starting with a design, e.g.: `design <- getDesignGroupSequential()` * Find the optimal design parameters with help of `rpact` comparison tools: `getDesignSet` * Calculate the required sample size, e.g.: `getSampleSizeMeans()`, `getPowerMeans()` * Simulate specific characteristics of an adaptive design, e.g.: `getSimulationMeans()` * Collect your data, import it into R and create a dataset: `data <- getDataset()` * Analyze your data: `getAnalysisResults(design, data)` ### Focus on Usability The most important `rpact` functions have intuitive names: * `getDesign`[`GroupSequential`/`InverseNormal`/`Fisher`]`()` * `getDesignCharacteristics()` * `getSampleSize`[`Means`/`Rates`/`Survival`]`()` * `getPower`[`Means`/`Rates`/`Survival`]`()` * `getSimulation`[`MultiArm`/`Enrichment`]``[`Means`/`Rates`/`Survival`]`()` * `getDataSet()` * `getAnalysisResults()` * `getStageResults()` RStudio/Eclipse: auto code completion makes it easy to use these functions. ### R generics In general, everything runs with the R standard functions which are always present in R: so-called R generics, e.g., `print`, `summary`, `plot`, `as.data.frame`, `names`, `length` ### Utilities Several utility functions are available, e.g. * `getAccrualTime()` * `getPiecewiseSurvivalTime()` * `getNumberOfSubjects()` * `getEventProbabilities()` * `getPiecewiseExponentialDistribution()` * survival helper functions for conversion of `pi`, `lambda` and `median`, e.g., `getLambdaByMedian()` * `testPackage()`: installation qualification on a client computer or company server (via unit tests) ## Validation Please [contact](https://www.rpact.com/contact) us to learn how to use `rpact` on FDA/GxP-compliant validated corporate computer systems and how to get a copy of the formal validation documentation that is customized and licensed for exclusive use by your company, e.g., to fulfill regulatory requirements. ## About * **rpact** is a comprehensive validated^[The rpact validation documentation is available exclusively for our customers and supporting companies. For more information visit [www.rpact.com/services/sla](https://www.rpact.com/services/sla)] R package for clinical research which + enables the design and analysis of confirmatory adaptive group sequential designs + is a powerful sample size calculator + is a free of charge open-source software licensed under [LGPL-3](https://cran.r-project.org/web/licenses/LGPL-3) + particularly, implements the methods described in the recent monograph by [Wassmer and Brannath (2016)](https://doi.org/10.1007%2F978-3-319-32562-0) > For more information please visit [www.rpact.org](https://www.rpact.org) * **RPACT** is a company which offers + enterprise software development services + technical support for the `rpact` package + consultancy and user training for clinical research using R + validated software solutions and R package development for clinical research > For more information please visit [www.rpact.com](https://www.rpact.com) ## Contact * [info@rpact.com](mailto:info@rpact.com) * [www.rpact.com/contact](https://www.rpact.com/contact) rpact/inst/extdata/0000755000176200001440000000000014070776016014002 5ustar liggesusersrpact/inst/extdata/dataset_survival_multi-arm.csv0000644000176200001440000000043714017174147022070 0ustar liggesusers"stages","groups","overallEvents","overallAllocationRatios","overallLogRanks","events","allocationRatios","logRanks" 1,1,25,1,2.2,25,1,2.2 1,2,18,1,1.99,18,1,1.99 1,3,22,1,2.32,22,1,2.32 2,1,57,1,2.80566916144919,32,1,1.8 2,2,NA,NA,NA,NA,NA,NA 2,3,58,1,3.09118512796343,36,1,2.11 rpact/inst/extdata/dataset_rates.csv0000644000176200001440000000031614017174147017340 0ustar liggesusers"stages","groups","sampleSizes","events","overallSampleSizes","overallEvents" 1,1,11,10,11,10 1,2,8,3,8,3 2,1,13,10,24,20 2,2,10,5,18,8 3,1,12,12,36,32 3,2,9,5,27,13 4,1,13,12,49,44 4,2,11,6,38,19 rpact/inst/extdata/dataset_means_multi-arm.csv0000644000176200001440000000060514017174147021315 0ustar liggesusers"stages","groups","sampleSizes","means","stDevs","overallSampleSizes","overallMeans","overallStDevs" 1,1,13,242,244,13,242,244 1,2,15,188,212,15,188,212 1,3,14,267,256,14,267,256 1,4,12,92,215,12,92,215 2,1,25,222,221,38,228.842105263158,226.013456465663 2,2,NA,NA,NA,NA,NA,NA 2,3,27,277,232,41,273.585365853659,237.292749110646 2,4,29,122,227,41,113.219512195122,221.29878131105 rpact/inst/extdata/dataset_rates_multi-arm.csv0000644000176200001440000000024114017174147021324 0ustar liggesusers"stages","groups","sampleSizes","events" 1,1,11,10 1,2,8,3 1,3,7,2 2,1,13,10 2,2,10,5 2,3,10,4 3,1,12,12 3,2,9,5 3,3,8,3 4,1,13,12 4,2,11,6 4,3,9,5 rpact/inst/extdata/datasets_rates.csv0000644000176200001440000000056714017174147017533 0ustar liggesusers"datasetId","stages","groups","sampleSizes","events","overallSampleSizes","overallEvents" 1,1,1,11,10,11,10 1,1,2,8,3,8,3 1,2,1,13,10,24,20 1,2,2,10,5,18,8 1,3,1,12,12,36,32 1,3,2,9,5,27,13 1,4,1,13,12,49,44 1,4,2,11,6,38,19 2,1,1,9,10,9,10 2,1,2,6,4,6,4 2,2,1,13,10,22,20 2,2,2,10,5,16,9 2,3,1,12,12,34,32 2,3,2,9,5,25,14 2,4,1,13,12,47,44 2,4,2,11,6,36,20 rpact/inst/tests/0000755000176200001440000000000014070776016013512 5ustar liggesusersrpact/inst/tests/testthat/0000755000176200001440000000000014450555552015354 5ustar liggesusersrpact/inst/tests/testthat/test-rpact.R0000644000176200001440000002770414375425166017602 0ustar liggesusers## | ## | *Unit tests* ## | ## | This file is part of the R package rpact: ## | Confirmatory Adaptive Clinical Trial Design and Analysis ## | ## | Author: Gernot Wassmer, PhD, and Friedrich Pahlke, PhD ## | Licensed under "GNU Lesser General Public License" version 3 ## | License text can be found here: https://www.r-project.org/Licenses/LGPL-3 ## | ## | RPACT company website: https://www.rpact.com ## | RPACT package website: https://www.rpact.org ## | ## | Contact us for information about our services: info@rpact.com ## | ## | File name: test-rpact.R ## | Creation date: 21 April 2021, 15:04:49 ## | File version: $Revision: 6825 $ ## | Last changed: $Date: 2023-02-22 15:45:12 +0100 (Mi, 22 Feb 2023) $ ## | Last changed by: $Author: pahlke $ ## | context("Testing the rpact package") test_that("'getDesignInverseNormal' with default parameters: parameters and results are as expected", { designInverseNormal <- getDesignInverseNormal() expect_equal(designInverseNormal$alphaSpent, c(0.00025917372, 0.0071600594, 0.02499999), tolerance = 1e-07) expect_equal(designInverseNormal$criticalValues, c(3.4710914, 2.4544323, 2.0040356), tolerance = 1e-07) expect_equal(designInverseNormal$stageLevels, c(0.00025917372, 0.0070553616, 0.022533125), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designInverseNormal), NA))) expect_output(print(designInverseNormal)$show()) invisible(capture.output(expect_error(summary(designInverseNormal), NA))) expect_output(summary(designInverseNormal)$show()) } }) test_that("'getDesignFisher' with default parameters: parameters and results are as expected", { designFisher <- getDesignFisher() expect_equal(designFisher$alphaSpent, c(0.012308547, 0.01962413, 0.025), tolerance = 1e-07) expect_equal(designFisher$criticalValues, c(0.012308547, 0.0016635923, 0.00029106687), tolerance = 1e-07) expect_equal(designFisher$stageLevels, c(0.012308547, 0.012308547, 0.012308547), tolerance = 1e-07) expect_equal(designFisher$scale, c(1, 1)) expect_equal(designFisher$nonStochasticCurtailment, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designFisher), NA))) expect_output(print(designFisher)$show()) invisible(capture.output(expect_error(summary(designFisher), NA))) expect_output(summary(designFisher)$show()) } }) test_that("Testing 'getPiecewiseSurvivalTime': simple vector based definition", { pwSurvivalTime1 <- getPiecewiseSurvivalTime(lambda2 = 0.5, hazardRatio = 0.8) expect_equal(pwSurvivalTime1$piecewiseSurvivalTime, 0) expect_equal(pwSurvivalTime1$lambda1, 0.4, tolerance = 1e-07) expect_equal(pwSurvivalTime1$lambda2, 0.5, tolerance = 1e-07) expect_equal(pwSurvivalTime1$hazardRatio, 0.8, tolerance = 1e-07) expect_equal(pwSurvivalTime1$pi1, NA_real_) expect_equal(pwSurvivalTime1$pi2, NA_real_) expect_equal(pwSurvivalTime1$median1, 1.732868, tolerance = 1e-07) expect_equal(pwSurvivalTime1$median2, 1.3862944, tolerance = 1e-07) expect_equal(pwSurvivalTime1$eventTime, NA_real_) expect_equal(pwSurvivalTime1$kappa, 1) expect_equal(pwSurvivalTime1$piecewiseSurvivalEnabled, FALSE) expect_equal(pwSurvivalTime1$delayedResponseAllowed, FALSE) expect_equal(pwSurvivalTime1$delayedResponseEnabled, FALSE) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(pwSurvivalTime1), NA))) expect_output(print(pwSurvivalTime1)$show()) invisible(capture.output(expect_error(summary(pwSurvivalTime1), NA))) expect_output(summary(pwSurvivalTime1)$show()) } }) test_that("'getSampleSizeMeans': Sample size calculation of testing means for one sided group sequential design", { designGS1pretest <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) expect_equal(designGS1pretest$alphaSpent, c(0.0020595603, 0.0098772988, 0.02499999), tolerance = 1e-07) expect_equal(designGS1pretest$criticalValues, c(2.8688923, 2.3885055, 2.0793148), tolerance = 1e-07) expect_equal(designGS1pretest$stageLevels, c(0.0020595603, 0.0084585282, 0.018794214), tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(designGS1pretest), NA))) expect_output(print(designGS1pretest)$show()) invisible(capture.output(expect_error(summary(designGS1pretest), NA))) expect_output(summary(designGS1pretest)$show()) } designGS1 <- getDesignGroupSequential( informationRates = c(0.2, 0.5, 1), sided = 1, beta = 0.1, typeOfDesign = "WT", deltaWT = 0.3 ) sampleSizeResult <- getSampleSizeMeans(designGS1, groups = 1, thetaH0 = 0.5, stDev = 2, normalApproximation = FALSE, alternative = 0.8 ) expect_equal(sampleSizeResult$maxNumberOfSubjects, 494.6455, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[1, ], 98.929099, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[2, ], 247.32275, tolerance = 1e-07) expect_equal(sampleSizeResult$numberOfSubjects[3, ], 494.6455, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH0, 491.89699, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH01, 462.87248, tolerance = 1e-07) expect_equal(sampleSizeResult$expectedNumberOfSubjectsH1, 360.24062, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[1, ], 0.08379162, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[2, ], 0.40937259, tolerance = 1e-07) expect_equal(sampleSizeResult$rejectPerStage[3, ], 0.40683579, tolerance = 1e-07) expect_equal(sampleSizeResult$earlyStop, 0.49316421, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[1, ], 1.090771, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[2, ], 0.80583608, tolerance = 1e-07) expect_equal(sampleSizeResult$criticalValuesEffectScale[3, ], 0.68748891, tolerance = 1e-07) if (isTRUE(.isCompleteUnitTestSetEnabled())) { invisible(capture.output(expect_error(print(sampleSizeResult), NA))) expect_output(print(sampleSizeResult)$show()) invisible(capture.output(expect_error(summary(sampleSizeResult), NA))) expect_output(summary(sampleSizeResult)$show()) } }) test_that("Testing generic functions: no errors occur", { .skipTestIfDisabled() design <- getDesignGroupSequential( alpha = 0.05, kMax = 4, sided = 1, typeOfDesign = "WT", deltaWT = 0.1 ) designFisher <- getDesignFisher( kMax = 4, alpha = 0.025, informationRates = c(0.2, 0.5, 0.8, 1), alpha0Vec = rep(0.4, 3) ) designCharacteristics <- getDesignCharacteristics(design) powerAndASN <- getPowerAndAverageSampleNumber(design, theta = 1, nMax = 100) designSet <- getDesignSet(design = design, deltaWT = c(0.3, 0.4)) dataset <- getDataset( n1 = c(22, 11, 22, 11), n2 = c(22, 13, 22, 13), means1 = c(1, 1.1, 1, 1), means2 = c(1.4, 1.5, 3, 2.5), stDevs1 = c(1, 2, 2, 1.3), stDevs2 = c(1, 2, 2, 1.3) ) stageResults <- getStageResults(design, dataset) suppressWarnings(designPlan <- getSampleSizeMeans(design)) simulationResults <- getSimulationSurvival(design, maxNumberOfSubjects = 1200, plannedEvents = c(50, 100, 150, 200), seed = 12345 ) piecewiseSurvivalTime <- getPiecewiseSurvivalTime(list( "0 - <6" = 0.025, "6 - <9" = 0.04, "9 - <15" = 0.015, "15 - <21" = 0.01, ">=21" = 0.007 ), hazardRatio = 0.8) accrualTime <- getAccrualTime(list( "0 - <12" = 15, "12 - <13" = 21, "13 - <14" = 27, "14 - <15" = 33, "15 - <16" = 39, ">=16" = 45 ), maxNumberOfSubjects = 1400) expect_vector(names(design)) expect_vector(names(designFisher)) expect_vector(names(designCharacteristics)) expect_vector(names(powerAndASN)) expect_vector(names(designSet)) expect_vector(names(dataset)) expect_vector(names(stageResults)) expect_vector(names(designPlan)) expect_vector(names(simulationResults)) expect_vector(names(piecewiseSurvivalTime)) expect_vector(names(accrualTime)) expect_output(print(design)) expect_output(print(designFisher)) expect_output(print(designCharacteristics)) expect_output(print(powerAndASN)) expect_output(print(designSet)) expect_output(print(dataset)) expect_output(print(stageResults)) expect_output(print(designPlan)) expect_output(print(simulationResults)) expect_output(print(piecewiseSurvivalTime)) expect_output(print(accrualTime)) expect_output(summary(design)$show()) expect_output(summary(designFisher)$show()) expect_output(summary(designCharacteristics)$show()) expect_output(summary(powerAndASN)) expect_output(print(summary(designSet))) expect_output(summary(dataset)$show()) expect_output(summary(stageResults)) expect_output(summary(designPlan)$show()) expect_output(summary(simulationResults)$show()) expect_output(summary(piecewiseSurvivalTime)) expect_output(summary(accrualTime)) expect_named(as.data.frame(design)) expect_named(as.data.frame(designFisher)) expect_named(as.data.frame(designCharacteristics)) expect_named(as.data.frame(powerAndASN)) expect_named(as.data.frame(designSet)) expect_named(as.data.frame(dataset)) expect_named(as.data.frame(stageResults)) expect_named(as.data.frame(designPlan)) expect_named(as.data.frame(simulationResults)) expect_named(as.data.frame(piecewiseSurvivalTime)) expect_named(as.data.frame(accrualTime)) expect_is(as.data.frame(design, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.data.frame(designFisher, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.data.frame(designCharacteristics, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.data.frame(powerAndASN, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.data.frame(designSet, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.data.frame(dataset, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.data.frame(stageResults, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.data.frame(designPlan, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.data.frame(simulationResults, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.data.frame(piecewiseSurvivalTime, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.data.frame(accrualTime, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.matrix(design), "matrix") expect_is(as.matrix(designFisher), "matrix") expect_is(as.matrix(designCharacteristics), "matrix") expect_is(as.matrix(powerAndASN), "matrix") expect_is(as.matrix(designSet), "matrix") expect_is(as.matrix(dataset), "matrix") expect_is(as.matrix(stageResults), "matrix") expect_is(as.matrix(designPlan), "matrix") expect_is(as.matrix(simulationResults), "matrix") expect_is(as.matrix(piecewiseSurvivalTime), "matrix") expect_is(as.matrix(accrualTime), "matrix") suppressWarnings(analysisResults <- getAnalysisResults(design, dataset)) expect_vector(names(analysisResults)) expect_output(print(analysisResults)) expect_output(summary(analysisResults)$show()) expect_named(as.data.frame(analysisResults)) expect_is(as.data.frame(analysisResults, niceColumnNamesEnabled = FALSE), "data.frame") expect_is(as.matrix(analysisResults), "matrix") }) rpact/inst/tests/testthat.R0000644000176200001440000000007414017174147015474 0ustar liggesusers library(testthat) library(rpact) test_check("rpact") rpact/inst/WORDLIST0000644000176200001440000000441314370462345013544 0ustar liggesusersaccrualIntensityType accruallIntensity accrualTime allocationRatioPlanned alphaSpent Brannath calcEventsFunction calcSubjectsFunction condErrorRate conditionalPower criticalValues CRP csv debian doi earlyStop effectDifference effectEstimate effectMeasure effectSizes emmeans eventsNotAchieved eventTime followUpTime futilityBounds getConditionalPower getConditionalRejectionProbabilities getData getDataset getFinalPValue getOutputFormat getPiecewiseExponentialRandomNumbers getPiecewiseSurvivalTime getPower getRepeatedPValues getSampleSize getSimulation getSimulationSurvival getStageResults getTestActions hazardRatio informationEpsilon informationRates isFALSE kable kMax knitr linux maxInformation maxNumberOfAdditionalEventsPerStage maxNumberOfAdditionalSubjectsPerStage maxNumberOfEventsPerStage maxNumberOfIterations maxNumberOfPatients maxNumberOfSubjects maxNumberOfSubjectsPerStage Mersenne minNumberOfAdditionalEventsPerStage minNumberOfAdditionalSubjectsPerStage minNumberOfEventsPerStage minNumberOfSubjectsPerStage niceColumnNamesEnabled noEarlyEfficacy normalApproximation numberOfSubjects numberOfSubjectsGroup overallPooledStDevs Pampallona ParameterSet piecewiseSurvivalTime plannedEvents plannedSubjects plotSettings pValues rawDataTwoArmNormal rejectPerStage rpwexp SampleSize setOutputFormat showStatistics solaris stageLevels stageResults stDevH stDevs studyDuration studyDurationH testPackage testStatistics thetaH thetaStandardized Tsiatis twoSidedPower typeOfDesign unstacked userAlphaSpending Wolbers WToptimum xlim ylim Wassmer amongst AnalysisResults ASN Biometrics cowplot CROs cumulativeEventProbabilities DeCani decisionMatrix deltaWT DeMets dropoutEvent dropoutTime Dunnett Emax EMMs Farrington getOption ggplot ggpubr gridExtra gsDesign GxP Haybittle Hsieh https Hwang Koehne Koenig Lakatos Lan logrank mnormt observationTime Peto Pharma piecewiseLambda Pocock prevalences RCI RCIs Roehmel rpact's RStudio Schoenfeld's sd selectable sep Shi sigmoid signif sla Satterthwaite Schüürhuis Springer survivalTime testthat timeUnderObservation TrialDesign TrialDesignCharacteristics trunc unblinded unstratified Welch www